Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @ 2125:7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
Summary: Make JvmtiGCMark safe to run non-safepoint and instrument CMS
Reviewed-by: ysr, dcubed
author | kamg |
---|---|
date | Mon, 10 Jan 2011 17:14:53 -0500 |
parents | f95d63e2154a |
children | c5a923563727 |
rev | line source |
---|---|
0 | 1 /* |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp" | |
27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" | |
28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" | |
29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" | |
30 #include "gc_implementation/shared/liveRange.hpp" | |
31 #include "gc_implementation/shared/spaceDecorator.hpp" | |
32 #include "gc_interface/collectedHeap.hpp" | |
33 #include "memory/allocation.inline.hpp" | |
34 #include "memory/blockOffsetTable.inline.hpp" | |
35 #include "memory/resourceArea.hpp" | |
36 #include "memory/universe.inline.hpp" | |
37 #include "oops/oop.inline.hpp" | |
38 #include "runtime/globals.hpp" | |
39 #include "runtime/handles.inline.hpp" | |
40 #include "runtime/init.hpp" | |
41 #include "runtime/java.hpp" | |
42 #include "runtime/vmThread.hpp" | |
43 #include "utilities/copy.hpp" | |
0 | 44 |
45 ///////////////////////////////////////////////////////////////////////// | |
46 //// CompactibleFreeListSpace | |
47 ///////////////////////////////////////////////////////////////////////// | |
48 | |
49 // highest ranked free list lock rank | |
50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3; | |
51 | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
52 // Defaults are 0 so things will break badly if incorrectly initialized. |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
53 int CompactibleFreeListSpace::IndexSetStart = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
54 int CompactibleFreeListSpace::IndexSetStride = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
55 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
56 size_t MinChunkSize = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
57 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
58 void CompactibleFreeListSpace::set_cms_values() { |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
59 // Set CMS global values |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
60 assert(MinChunkSize == 0, "already set"); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
61 #define numQuanta(x,y) ((x+y-1)/y) |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
62 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
63 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
64 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set"); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
65 IndexSetStart = MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
66 IndexSetStride = MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
67 } |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
68 |
0 | 69 // Constructor |
70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, | |
71 MemRegion mr, bool use_adaptive_freelists, | |
72 FreeBlockDictionary::DictionaryChoice dictionaryChoice) : | |
73 _dictionaryChoice(dictionaryChoice), | |
74 _adaptive_freelists(use_adaptive_freelists), | |
75 _bt(bs, mr), | |
76 // free list locks are in the range of values taken by _lockRank | |
77 // This range currently is [_leaf+2, _leaf+3] | |
78 // Note: this requires that CFLspace c'tors | |
79 // are called serially in the order in which the locks are | |
80 // are acquired in the program text. This is true today. | |
81 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true), | |
82 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1 | |
83 "CompactibleFreeListSpace._dict_par_lock", true), | |
84 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * | |
85 CMSRescanMultiple), | |
86 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * | |
87 CMSConcMarkMultiple), | |
88 _collector(NULL) | |
89 { | |
90 _bt.set_space(this); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
187
diff
changeset
|
91 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
0 | 92 // We have all of "mr", all of which we place in the dictionary |
93 // as one big chunk. We'll need to decide here which of several | |
94 // possible alternative dictionary implementations to use. For | |
95 // now the choice is easy, since we have only one working | |
96 // implementation, namely, the simple binary tree (splaying | |
97 // temporarily disabled). | |
98 switch (dictionaryChoice) { | |
99 case FreeBlockDictionary::dictionarySplayTree: | |
100 case FreeBlockDictionary::dictionarySkipList: | |
101 default: | |
102 warning("dictionaryChoice: selected option not understood; using" | |
103 " default BinaryTreeDictionary implementation instead."); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
104 case FreeBlockDictionary::dictionaryBinaryTree: |
0 | 105 _dictionary = new BinaryTreeDictionary(mr); |
106 break; | |
107 } | |
108 assert(_dictionary != NULL, "CMS dictionary initialization"); | |
109 // The indexed free lists are initially all empty and are lazily | |
110 // filled in on demand. Initialize the array elements to NULL. | |
111 initializeIndexedFreeListArray(); | |
112 | |
113 // Not using adaptive free lists assumes that allocation is first | |
114 // from the linAB's. Also a cms perm gen which can be compacted | |
115 // has to have the klass's klassKlass allocated at a lower | |
116 // address in the heap than the klass so that the klassKlass is | |
117 // moved to its new location before the klass is moved. | |
118 // Set the _refillSize for the linear allocation blocks | |
119 if (!use_adaptive_freelists) { | |
120 FreeChunk* fc = _dictionary->getChunk(mr.word_size()); | |
121 // The small linAB initially has all the space and will allocate | |
122 // a chunk of any size. | |
123 HeapWord* addr = (HeapWord*) fc; | |
124 _smallLinearAllocBlock.set(addr, fc->size() , | |
125 1024*SmallForLinearAlloc, fc->size()); | |
126 // Note that _unallocated_block is not updated here. | |
127 // Allocations from the linear allocation block should | |
128 // update it. | |
129 } else { | |
130 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, | |
131 SmallForLinearAlloc); | |
132 } | |
133 // CMSIndexedFreeListReplenish should be at least 1 | |
134 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish); | |
135 _promoInfo.setSpace(this); | |
136 if (UseCMSBestFit) { | |
137 _fitStrategy = FreeBlockBestFitFirst; | |
138 } else { | |
139 _fitStrategy = FreeBlockStrategyNone; | |
140 } | |
141 checkFreeListConsistency(); | |
142 | |
143 // Initialize locks for parallel case. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
144 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
145 if (CollectedHeap::use_parallel_gc_threads()) { |
0 | 146 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
147 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 | |
148 "a freelist par lock", | |
149 true); | |
150 if (_indexedFreeListParLocks[i] == NULL) | |
151 vm_exit_during_initialization("Could not allocate a par lock"); | |
152 DEBUG_ONLY( | |
153 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); | |
154 ) | |
155 } | |
156 _dictionary->set_par_lock(&_parDictionaryAllocLock); | |
157 } | |
158 } | |
159 | |
160 // Like CompactibleSpace forward() but always calls cross_threshold() to | |
161 // update the block offset table. Removed initialize_threshold call because | |
162 // CFLS does not use a block offset array for contiguous spaces. | |
163 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, | |
164 CompactPoint* cp, HeapWord* compact_top) { | |
165 // q is alive | |
166 // First check if we should switch compaction space | |
167 assert(this == cp->space, "'this' should be current compaction space."); | |
168 size_t compaction_max_size = pointer_delta(end(), compact_top); | |
169 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size), | |
170 "virtual adjustObjectSize_v() method is not correct"); | |
171 size_t adjusted_size = adjustObjectSize(size); | |
172 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0, | |
173 "no small fragments allowed"); | |
174 assert(minimum_free_block_size() == MinChunkSize, | |
175 "for de-virtualized reference below"); | |
176 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize | |
177 if (adjusted_size + MinChunkSize > compaction_max_size && | |
178 adjusted_size != compaction_max_size) { | |
179 do { | |
180 // switch to next compaction space | |
181 cp->space->set_compaction_top(compact_top); | |
182 cp->space = cp->space->next_compaction_space(); | |
183 if (cp->space == NULL) { | |
184 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); | |
185 assert(cp->gen != NULL, "compaction must succeed"); | |
186 cp->space = cp->gen->first_compaction_space(); | |
187 assert(cp->space != NULL, "generation must have a first compaction space"); | |
188 } | |
189 compact_top = cp->space->bottom(); | |
190 cp->space->set_compaction_top(compact_top); | |
191 // The correct adjusted_size may not be the same as that for this method | |
192 // (i.e., cp->space may no longer be "this" so adjust the size again. | |
193 // Use the virtual method which is not used above to save the virtual | |
194 // dispatch. | |
195 adjusted_size = cp->space->adjust_object_size_v(size); | |
196 compaction_max_size = pointer_delta(cp->space->end(), compact_top); | |
197 assert(cp->space->minimum_free_block_size() == 0, "just checking"); | |
198 } while (adjusted_size > compaction_max_size); | |
199 } | |
200 | |
201 // store the forwarding pointer into the mark word | |
202 if ((HeapWord*)q != compact_top) { | |
203 q->forward_to(oop(compact_top)); | |
204 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); | |
205 } else { | |
206 // if the object isn't moving we can just set the mark to the default | |
207 // mark and handle it specially later on. | |
208 q->init_mark(); | |
209 assert(q->forwardee() == NULL, "should be forwarded to NULL"); | |
210 } | |
211 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
212 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size)); |
0 | 213 compact_top += adjusted_size; |
214 | |
215 // we need to update the offset table so that the beginnings of objects can be | |
216 // found during scavenge. Note that we are updating the offset table based on | |
217 // where the object will be once the compaction phase finishes. | |
218 | |
219 // Always call cross_threshold(). A contiguous space can only call it when | |
220 // the compaction_top exceeds the current threshold but not for an | |
221 // non-contiguous space. | |
222 cp->threshold = | |
223 cp->space->cross_threshold(compact_top - adjusted_size, compact_top); | |
224 return compact_top; | |
225 } | |
226 | |
227 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt | |
228 // and use of single_block instead of alloc_block. The name here is not really | |
229 // appropriate - maybe a more general name could be invented for both the | |
230 // contiguous and noncontiguous spaces. | |
231 | |
232 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) { | |
233 _bt.single_block(start, the_end); | |
234 return end(); | |
235 } | |
236 | |
237 // Initialize them to NULL. | |
238 void CompactibleFreeListSpace::initializeIndexedFreeListArray() { | |
239 for (size_t i = 0; i < IndexSetSize; i++) { | |
240 // Note that on platforms where objects are double word aligned, | |
241 // the odd array elements are not used. It is convenient, however, | |
242 // to map directly from the object size to the array element. | |
243 _indexedFreeList[i].reset(IndexSetSize); | |
244 _indexedFreeList[i].set_size(i); | |
245 assert(_indexedFreeList[i].count() == 0, "reset check failed"); | |
246 assert(_indexedFreeList[i].head() == NULL, "reset check failed"); | |
247 assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); | |
248 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); | |
249 } | |
250 } | |
251 | |
252 void CompactibleFreeListSpace::resetIndexedFreeListArray() { | |
253 for (int i = 1; i < IndexSetSize; i++) { | |
254 assert(_indexedFreeList[i].size() == (size_t) i, | |
255 "Indexed free list sizes are incorrect"); | |
256 _indexedFreeList[i].reset(IndexSetSize); | |
257 assert(_indexedFreeList[i].count() == 0, "reset check failed"); | |
258 assert(_indexedFreeList[i].head() == NULL, "reset check failed"); | |
259 assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); | |
260 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); | |
261 } | |
262 } | |
263 | |
264 void CompactibleFreeListSpace::reset(MemRegion mr) { | |
265 resetIndexedFreeListArray(); | |
266 dictionary()->reset(); | |
267 if (BlockOffsetArrayUseUnallocatedBlock) { | |
268 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen"); | |
269 // Everything's allocated until proven otherwise. | |
270 _bt.set_unallocated_block(end()); | |
271 } | |
272 if (!mr.is_empty()) { | |
273 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small"); | |
274 _bt.single_block(mr.start(), mr.word_size()); | |
275 FreeChunk* fc = (FreeChunk*) mr.start(); | |
276 fc->setSize(mr.word_size()); | |
277 if (mr.word_size() >= IndexSetSize ) { | |
278 returnChunkToDictionary(fc); | |
279 } else { | |
280 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
281 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc); | |
282 } | |
283 } | |
284 _promoInfo.reset(); | |
285 _smallLinearAllocBlock._ptr = NULL; | |
286 _smallLinearAllocBlock._word_size = 0; | |
287 } | |
288 | |
289 void CompactibleFreeListSpace::reset_after_compaction() { | |
290 // Reset the space to the new reality - one free chunk. | |
291 MemRegion mr(compaction_top(), end()); | |
292 reset(mr); | |
293 // Now refill the linear allocation block(s) if possible. | |
294 if (_adaptive_freelists) { | |
295 refillLinearAllocBlocksIfNeeded(); | |
296 } else { | |
297 // Place as much of mr in the linAB as we can get, | |
298 // provided it was big enough to go into the dictionary. | |
299 FreeChunk* fc = dictionary()->findLargestDict(); | |
300 if (fc != NULL) { | |
301 assert(fc->size() == mr.word_size(), | |
302 "Why was the chunk broken up?"); | |
303 removeChunkFromDictionary(fc); | |
304 HeapWord* addr = (HeapWord*) fc; | |
305 _smallLinearAllocBlock.set(addr, fc->size() , | |
306 1024*SmallForLinearAlloc, fc->size()); | |
307 // Note that _unallocated_block is not updated here. | |
308 } | |
309 } | |
310 } | |
311 | |
312 // Walks the entire dictionary, returning a coterminal | |
313 // chunk, if it exists. Use with caution since it involves | |
314 // a potentially complete walk of a potentially large tree. | |
315 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() { | |
316 | |
317 assert_lock_strong(&_freelistLock); | |
318 | |
319 return dictionary()->find_chunk_ends_at(end()); | |
320 } | |
321 | |
322 | |
323 #ifndef PRODUCT | |
324 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() { | |
325 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
326 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0); | |
327 } | |
328 } | |
329 | |
330 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() { | |
331 size_t sum = 0; | |
332 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
333 sum += _indexedFreeList[i].allocation_stats()->returnedBytes(); | |
334 } | |
335 return sum; | |
336 } | |
337 | |
338 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const { | |
339 size_t count = 0; | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
340 for (int i = (int)MinChunkSize; i < IndexSetSize; i++) { |
0 | 341 debug_only( |
342 ssize_t total_list_count = 0; | |
343 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
344 fc = fc->next()) { | |
345 total_list_count++; | |
346 } | |
347 assert(total_list_count == _indexedFreeList[i].count(), | |
348 "Count in list is incorrect"); | |
349 ) | |
350 count += _indexedFreeList[i].count(); | |
351 } | |
352 return count; | |
353 } | |
354 | |
355 size_t CompactibleFreeListSpace::totalCount() { | |
356 size_t num = totalCountInIndexedFreeLists(); | |
357 num += dictionary()->totalCount(); | |
358 if (_smallLinearAllocBlock._word_size != 0) { | |
359 num++; | |
360 } | |
361 return num; | |
362 } | |
363 #endif | |
364 | |
365 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const { | |
366 FreeChunk* fc = (FreeChunk*) p; | |
367 return fc->isFree(); | |
368 } | |
369 | |
370 size_t CompactibleFreeListSpace::used() const { | |
371 return capacity() - free(); | |
372 } | |
373 | |
374 size_t CompactibleFreeListSpace::free() const { | |
375 // "MT-safe, but not MT-precise"(TM), if you will: i.e. | |
376 // if you do this while the structures are in flux you | |
377 // may get an approximate answer only; for instance | |
378 // because there is concurrent allocation either | |
379 // directly by mutators or for promotion during a GC. | |
380 // It's "MT-safe", however, in the sense that you are guaranteed | |
381 // not to crash and burn, for instance, because of walking | |
382 // pointers that could disappear as you were walking them. | |
383 // The approximation is because the various components | |
384 // that are read below are not read atomically (and | |
385 // further the computation of totalSizeInIndexedFreeLists() | |
386 // is itself a non-atomic computation. The normal use of | |
387 // this is during a resize operation at the end of GC | |
388 // and at that time you are guaranteed to get the | |
389 // correct actual value. However, for instance, this is | |
390 // also read completely asynchronously by the "perf-sampler" | |
391 // that supports jvmstat, and you are apt to see the values | |
392 // flicker in such cases. | |
393 assert(_dictionary != NULL, "No _dictionary?"); | |
394 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) + | |
395 totalSizeInIndexedFreeLists() + | |
396 _smallLinearAllocBlock._word_size) * HeapWordSize; | |
397 } | |
398 | |
399 size_t CompactibleFreeListSpace::max_alloc_in_words() const { | |
400 assert(_dictionary != NULL, "No _dictionary?"); | |
401 assert_locked(); | |
402 size_t res = _dictionary->maxChunkSize(); | |
403 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size, | |
404 (size_t) SmallForLinearAlloc - 1)); | |
405 // XXX the following could potentially be pretty slow; | |
406 // should one, pesimally for the rare cases when res | |
407 // caclulated above is less than IndexSetSize, | |
408 // just return res calculated above? My reasoning was that | |
409 // those cases will be so rare that the extra time spent doesn't | |
410 // really matter.... | |
411 // Note: do not change the loop test i >= res + IndexSetStride | |
412 // to i > res below, because i is unsigned and res may be zero. | |
413 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride; | |
414 i -= IndexSetStride) { | |
415 if (_indexedFreeList[i].head() != NULL) { | |
416 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); | |
417 return i; | |
418 } | |
419 } | |
420 return res; | |
421 } | |
422 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
423 void LinearAllocBlock::print_on(outputStream* st) const { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
424 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
425 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT, |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
426 _ptr, _word_size, _refillSize, _allocation_size_limit); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
427 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
428 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
429 void CompactibleFreeListSpace::print_on(outputStream* st) const { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
430 st->print_cr("COMPACTIBLE FREELIST SPACE"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
431 st->print_cr(" Space:"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
432 Space::print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
433 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
434 st->print_cr("promoInfo:"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
435 _promoInfo.print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
436 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
437 st->print_cr("_smallLinearAllocBlock"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
438 _smallLinearAllocBlock.print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
439 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
440 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
441 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
442 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
443 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
444 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
445 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
446 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
447 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
448 reportIndexedFreeListStatistics(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
449 gclog_or_tty->print_cr("Layout of Indexed Freelists"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
450 gclog_or_tty->print_cr("---------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
451 FreeList::print_labels_on(st, "size"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
452 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
453 _indexedFreeList[i].print_on(gclog_or_tty); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
454 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
455 fc = fc->next()) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
456 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
457 fc, (HeapWord*)fc + i, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
458 fc->cantCoalesce() ? "\t CC" : ""); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
459 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
460 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
461 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
462 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
463 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
464 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
465 _promoInfo.print_on(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
466 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
467 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
468 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
469 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
470 _dictionary->reportStatistics(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
471 st->print_cr("Layout of Freelists in Tree"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
472 st->print_cr("---------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
473 _dictionary->print_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
474 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
475 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
476 class BlkPrintingClosure: public BlkClosure { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
477 const CMSCollector* _collector; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
478 const CompactibleFreeListSpace* _sp; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
479 const CMSBitMap* _live_bit_map; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
480 const bool _post_remark; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
481 outputStream* _st; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
482 public: |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
483 BlkPrintingClosure(const CMSCollector* collector, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
484 const CompactibleFreeListSpace* sp, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
485 const CMSBitMap* live_bit_map, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
486 outputStream* st): |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
487 _collector(collector), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
488 _sp(sp), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
489 _live_bit_map(live_bit_map), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
490 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
491 _st(st) { } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
492 size_t do_blk(HeapWord* addr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
493 }; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
494 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
495 size_t BlkPrintingClosure::do_blk(HeapWord* addr) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
496 size_t sz = _sp->block_size_no_stall(addr, _collector); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
497 assert(sz != 0, "Should always be able to compute a size"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
498 if (_sp->block_is_obj(addr)) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
499 const bool dead = _post_remark && !_live_bit_map->isMarked(addr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
500 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
501 addr, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
502 dead ? "dead" : "live", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
503 sz, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
504 (!dead && CMSPrintObjectsInDump) ? ":" : "."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
505 if (CMSPrintObjectsInDump && !dead) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
506 oop(addr)->print_on(_st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
507 _st->print_cr("--------------------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
508 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
509 } else { // free block |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
510 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
511 addr, sz, CMSPrintChunksInDump ? ":" : "."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
512 if (CMSPrintChunksInDump) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
513 ((FreeChunk*)addr)->print_on(_st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
514 _st->print_cr("--------------------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
515 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
516 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
517 return sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
518 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
519 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
520 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
521 outputStream* st) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
522 st->print_cr("\n========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
523 st->print_cr("Block layout in CMS Heap:"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
524 st->print_cr("========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
525 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
526 blk_iterate(&bpcl); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
527 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
528 st->print_cr("\n======================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
529 st->print_cr("Order & Layout of Promotion Info Blocks"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
530 st->print_cr("======================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
531 print_promo_info_blocks(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
532 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
533 st->print_cr("\n==========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
534 st->print_cr("Order of Indexed Free Lists"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
535 st->print_cr("========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
536 print_indexed_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
537 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
538 st->print_cr("\n================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
539 st->print_cr("Order of Free Lists in Dictionary"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
540 st->print_cr("================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
541 print_dictionary_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
542 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
543 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
544 |
0 | 545 void CompactibleFreeListSpace::reportFreeListStatistics() const { |
546 assert_lock_strong(&_freelistLock); | |
547 assert(PrintFLSStatistics != 0, "Reporting error"); | |
548 _dictionary->reportStatistics(); | |
549 if (PrintFLSStatistics > 1) { | |
550 reportIndexedFreeListStatistics(); | |
551 size_t totalSize = totalSizeInIndexedFreeLists() + | |
552 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); | |
553 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag()); | |
554 } | |
555 } | |
556 | |
557 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const { | |
558 assert_lock_strong(&_freelistLock); | |
559 gclog_or_tty->print("Statistics for IndexedFreeLists:\n" | |
560 "--------------------------------\n"); | |
561 size_t totalSize = totalSizeInIndexedFreeLists(); | |
562 size_t freeBlocks = numFreeBlocksInIndexedFreeLists(); | |
563 gclog_or_tty->print("Total Free Space: %d\n", totalSize); | |
564 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists()); | |
565 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks); | |
566 if (freeBlocks != 0) { | |
567 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks); | |
568 } | |
569 } | |
570 | |
571 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const { | |
572 size_t res = 0; | |
573 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
574 debug_only( | |
575 ssize_t recount = 0; | |
576 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
577 fc = fc->next()) { | |
578 recount += 1; | |
579 } | |
580 assert(recount == _indexedFreeList[i].count(), | |
581 "Incorrect count in list"); | |
582 ) | |
583 res += _indexedFreeList[i].count(); | |
584 } | |
585 return res; | |
586 } | |
587 | |
588 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const { | |
589 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { | |
590 if (_indexedFreeList[i].head() != NULL) { | |
591 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); | |
592 return (size_t)i; | |
593 } | |
594 } | |
595 return 0; | |
596 } | |
597 | |
598 void CompactibleFreeListSpace::set_end(HeapWord* value) { | |
599 HeapWord* prevEnd = end(); | |
600 assert(prevEnd != value, "unnecessary set_end call"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
601 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
602 "New end is below unallocated block"); |
0 | 603 _end = value; |
604 if (prevEnd != NULL) { | |
605 // Resize the underlying block offset table. | |
606 _bt.resize(pointer_delta(value, bottom())); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
607 if (value <= prevEnd) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
608 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
609 "New end is below unallocated block"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
610 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
611 // Now, take this new chunk and add it to the free blocks. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
612 // Note that the BOT has not yet been updated for this block. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
613 size_t newFcSize = pointer_delta(value, prevEnd); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
614 // XXX This is REALLY UGLY and should be fixed up. XXX |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
615 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
616 // Mark the boundary of the new block in BOT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
617 _bt.mark_block(prevEnd, value); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
618 // put it all in the linAB |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
619 if (ParallelGCThreads == 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
620 _smallLinearAllocBlock._ptr = prevEnd; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
621 _smallLinearAllocBlock._word_size = newFcSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
622 repairLinearAllocBlock(&_smallLinearAllocBlock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
623 } else { // ParallelGCThreads > 0 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
624 MutexLockerEx x(parDictionaryAllocLock(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
625 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
626 _smallLinearAllocBlock._ptr = prevEnd; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
627 _smallLinearAllocBlock._word_size = newFcSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
628 repairLinearAllocBlock(&_smallLinearAllocBlock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
629 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
630 // Births of chunks put into a LinAB are not recorded. Births |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
631 // of chunks as they are allocated out of a LinAB are. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
632 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
633 // Add the block to the free lists, if possible coalescing it |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
634 // with the last free block, and update the BOT and census data. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
635 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize); |
0 | 636 } |
637 } | |
638 } | |
639 } | |
640 | |
641 class FreeListSpace_DCTOC : public Filtering_DCTOC { | |
642 CompactibleFreeListSpace* _cfls; | |
643 CMSCollector* _collector; | |
644 protected: | |
645 // Override. | |
646 #define walk_mem_region_with_cl_DECL(ClosureType) \ | |
647 virtual void walk_mem_region_with_cl(MemRegion mr, \ | |
648 HeapWord* bottom, HeapWord* top, \ | |
649 ClosureType* cl); \ | |
650 void walk_mem_region_with_cl_par(MemRegion mr, \ | |
651 HeapWord* bottom, HeapWord* top, \ | |
652 ClosureType* cl); \ | |
653 void walk_mem_region_with_cl_nopar(MemRegion mr, \ | |
654 HeapWord* bottom, HeapWord* top, \ | |
655 ClosureType* cl) | |
656 walk_mem_region_with_cl_DECL(OopClosure); | |
657 walk_mem_region_with_cl_DECL(FilteringClosure); | |
658 | |
659 public: | |
660 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, | |
661 CMSCollector* collector, | |
662 OopClosure* cl, | |
663 CardTableModRefBS::PrecisionStyle precision, | |
664 HeapWord* boundary) : | |
665 Filtering_DCTOC(sp, cl, precision, boundary), | |
666 _cfls(sp), _collector(collector) {} | |
667 }; | |
668 | |
669 // We de-virtualize the block-related calls below, since we know that our | |
670 // space is a CompactibleFreeListSpace. | |
671 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ | |
672 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ | |
673 HeapWord* bottom, \ | |
674 HeapWord* top, \ | |
675 ClosureType* cl) { \ | |
676 if (SharedHeap::heap()->n_par_threads() > 0) { \ | |
677 walk_mem_region_with_cl_par(mr, bottom, top, cl); \ | |
678 } else { \ | |
679 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ | |
680 } \ | |
681 } \ | |
682 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \ | |
683 HeapWord* bottom, \ | |
684 HeapWord* top, \ | |
685 ClosureType* cl) { \ | |
686 /* Skip parts that are before "mr", in case "block_start" sent us \ | |
687 back too far. */ \ | |
688 HeapWord* mr_start = mr.start(); \ | |
689 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
690 HeapWord* next = bottom + bot_size; \ | |
691 while (next < mr_start) { \ | |
692 bottom = next; \ | |
693 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
694 next = bottom + bot_size; \ | |
695 } \ | |
696 \ | |
697 while (bottom < top) { \ | |
698 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \ | |
699 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ | |
700 oop(bottom)) && \ | |
701 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ | |
702 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ | |
703 bottom += _cfls->adjustObjectSize(word_sz); \ | |
704 } else { \ | |
705 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
706 } \ | |
707 } \ | |
708 } \ | |
709 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \ | |
710 HeapWord* bottom, \ | |
711 HeapWord* top, \ | |
712 ClosureType* cl) { \ | |
713 /* Skip parts that are before "mr", in case "block_start" sent us \ | |
714 back too far. */ \ | |
715 HeapWord* mr_start = mr.start(); \ | |
716 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
717 HeapWord* next = bottom + bot_size; \ | |
718 while (next < mr_start) { \ | |
719 bottom = next; \ | |
720 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
721 next = bottom + bot_size; \ | |
722 } \ | |
723 \ | |
724 while (bottom < top) { \ | |
725 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \ | |
726 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ | |
727 oop(bottom)) && \ | |
728 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ | |
729 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ | |
730 bottom += _cfls->adjustObjectSize(word_sz); \ | |
731 } else { \ | |
732 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
733 } \ | |
734 } \ | |
735 } | |
736 | |
737 // (There are only two of these, rather than N, because the split is due | |
738 // only to the introduction of the FilteringClosure, a local part of the | |
739 // impl of this abstraction.) | |
740 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure) | |
741 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) | |
742 | |
743 DirtyCardToOopClosure* | |
744 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl, | |
745 CardTableModRefBS::PrecisionStyle precision, | |
746 HeapWord* boundary) { | |
747 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); | |
748 } | |
749 | |
750 | |
751 // Note on locking for the space iteration functions: | |
752 // since the collector's iteration activities are concurrent with | |
753 // allocation activities by mutators, absent a suitable mutual exclusion | |
754 // mechanism the iterators may go awry. For instace a block being iterated | |
755 // may suddenly be allocated or divided up and part of it allocated and | |
756 // so on. | |
757 | |
758 // Apply the given closure to each block in the space. | |
759 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) { | |
760 assert_lock_strong(freelistLock()); | |
761 HeapWord *cur, *limit; | |
762 for (cur = bottom(), limit = end(); cur < limit; | |
763 cur += cl->do_blk_careful(cur)); | |
764 } | |
765 | |
766 // Apply the given closure to each block in the space. | |
767 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) { | |
768 assert_lock_strong(freelistLock()); | |
769 HeapWord *cur, *limit; | |
770 for (cur = bottom(), limit = end(); cur < limit; | |
771 cur += cl->do_blk(cur)); | |
772 } | |
773 | |
774 // Apply the given closure to each oop in the space. | |
775 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) { | |
776 assert_lock_strong(freelistLock()); | |
777 HeapWord *cur, *limit; | |
778 size_t curSize; | |
779 for (cur = bottom(), limit = end(); cur < limit; | |
780 cur += curSize) { | |
781 curSize = block_size(cur); | |
782 if (block_is_obj(cur)) { | |
783 oop(cur)->oop_iterate(cl); | |
784 } | |
785 } | |
786 } | |
787 | |
788 // Apply the given closure to each oop in the space \intersect memory region. | |
789 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) { | |
790 assert_lock_strong(freelistLock()); | |
791 if (is_empty()) { | |
792 return; | |
793 } | |
794 MemRegion cur = MemRegion(bottom(), end()); | |
795 mr = mr.intersection(cur); | |
796 if (mr.is_empty()) { | |
797 return; | |
798 } | |
799 if (mr.equals(cur)) { | |
800 oop_iterate(cl); | |
801 return; | |
802 } | |
803 assert(mr.end() <= end(), "just took an intersection above"); | |
804 HeapWord* obj_addr = block_start(mr.start()); | |
805 HeapWord* t = mr.end(); | |
806 | |
807 SpaceMemRegionOopsIterClosure smr_blk(cl, mr); | |
808 if (block_is_obj(obj_addr)) { | |
809 // Handle first object specially. | |
810 oop obj = oop(obj_addr); | |
811 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk)); | |
812 } else { | |
813 FreeChunk* fc = (FreeChunk*)obj_addr; | |
814 obj_addr += fc->size(); | |
815 } | |
816 while (obj_addr < t) { | |
817 HeapWord* obj = obj_addr; | |
818 obj_addr += block_size(obj_addr); | |
819 // If "obj_addr" is not greater than top, then the | |
820 // entire object "obj" is within the region. | |
821 if (obj_addr <= t) { | |
822 if (block_is_obj(obj)) { | |
823 oop(obj)->oop_iterate(cl); | |
824 } | |
825 } else { | |
826 // "obj" extends beyond end of region | |
827 if (block_is_obj(obj)) { | |
828 oop(obj)->oop_iterate(&smr_blk); | |
829 } | |
830 break; | |
831 } | |
832 } | |
833 } | |
834 | |
835 // NOTE: In the following methods, in order to safely be able to | |
836 // apply the closure to an object, we need to be sure that the | |
837 // object has been initialized. We are guaranteed that an object | |
838 // is initialized if we are holding the Heap_lock with the | |
839 // world stopped. | |
840 void CompactibleFreeListSpace::verify_objects_initialized() const { | |
841 if (is_init_completed()) { | |
842 assert_locked_or_safepoint(Heap_lock); | |
843 if (Universe::is_fully_initialized()) { | |
844 guarantee(SafepointSynchronize::is_at_safepoint(), | |
845 "Required for objects to be initialized"); | |
846 } | |
847 } // else make a concession at vm start-up | |
848 } | |
849 | |
850 // Apply the given closure to each object in the space | |
851 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) { | |
852 assert_lock_strong(freelistLock()); | |
853 NOT_PRODUCT(verify_objects_initialized()); | |
854 HeapWord *cur, *limit; | |
855 size_t curSize; | |
856 for (cur = bottom(), limit = end(); cur < limit; | |
857 cur += curSize) { | |
858 curSize = block_size(cur); | |
859 if (block_is_obj(cur)) { | |
860 blk->do_object(oop(cur)); | |
861 } | |
862 } | |
863 } | |
864 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
865 // Apply the given closure to each live object in the space |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
866 // The usage of CompactibleFreeListSpace |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
867 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
868 // objects in the space with references to objects that are no longer |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
869 // valid. For example, an object may reference another object |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
870 // that has already been sweep up (collected). This method uses |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
871 // obj_is_alive() to determine whether it is safe to apply the closure to |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
872 // an object. See obj_is_alive() for details on how liveness of an |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
873 // object is decided. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
874 |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
875 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
876 assert_lock_strong(freelistLock()); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
877 NOT_PRODUCT(verify_objects_initialized()); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
878 HeapWord *cur, *limit; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
879 size_t curSize; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
880 for (cur = bottom(), limit = end(); cur < limit; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
881 cur += curSize) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
882 curSize = block_size(cur); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
883 if (block_is_obj(cur) && obj_is_alive(cur)) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
884 blk->do_object(oop(cur)); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
885 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
886 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
887 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
888 |
0 | 889 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, |
890 UpwardsObjectClosure* cl) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
891 assert_locked(freelistLock()); |
0 | 892 NOT_PRODUCT(verify_objects_initialized()); |
893 Space::object_iterate_mem(mr, cl); | |
894 } | |
895 | |
896 // Callers of this iterator beware: The closure application should | |
897 // be robust in the face of uninitialized objects and should (always) | |
898 // return a correct size so that the next addr + size below gives us a | |
899 // valid block boundary. [See for instance, | |
900 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | |
901 // in ConcurrentMarkSweepGeneration.cpp.] | |
902 HeapWord* | |
903 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) { | |
904 assert_lock_strong(freelistLock()); | |
905 HeapWord *addr, *last; | |
906 size_t size; | |
907 for (addr = bottom(), last = end(); | |
908 addr < last; addr += size) { | |
909 FreeChunk* fc = (FreeChunk*)addr; | |
910 if (fc->isFree()) { | |
911 // Since we hold the free list lock, which protects direct | |
912 // allocation in this generation by mutators, a free object | |
913 // will remain free throughout this iteration code. | |
914 size = fc->size(); | |
915 } else { | |
916 // Note that the object need not necessarily be initialized, | |
917 // because (for instance) the free list lock does NOT protect | |
918 // object initialization. The closure application below must | |
919 // therefore be correct in the face of uninitialized objects. | |
920 size = cl->do_object_careful(oop(addr)); | |
921 if (size == 0) { | |
922 // An unparsable object found. Signal early termination. | |
923 return addr; | |
924 } | |
925 } | |
926 } | |
927 return NULL; | |
928 } | |
929 | |
930 // Callers of this iterator beware: The closure application should | |
931 // be robust in the face of uninitialized objects and should (always) | |
932 // return a correct size so that the next addr + size below gives us a | |
933 // valid block boundary. [See for instance, | |
934 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | |
935 // in ConcurrentMarkSweepGeneration.cpp.] | |
936 HeapWord* | |
937 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr, | |
938 ObjectClosureCareful* cl) { | |
939 assert_lock_strong(freelistLock()); | |
940 // Can't use used_region() below because it may not necessarily | |
941 // be the same as [bottom(),end()); although we could | |
942 // use [used_region().start(),round_to(used_region().end(),CardSize)), | |
943 // that appears too cumbersome, so we just do the simpler check | |
944 // in the assertion below. | |
945 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr), | |
946 "mr should be non-empty and within used space"); | |
947 HeapWord *addr, *end; | |
948 size_t size; | |
949 for (addr = block_start_careful(mr.start()), end = mr.end(); | |
950 addr < end; addr += size) { | |
951 FreeChunk* fc = (FreeChunk*)addr; | |
952 if (fc->isFree()) { | |
953 // Since we hold the free list lock, which protects direct | |
954 // allocation in this generation by mutators, a free object | |
955 // will remain free throughout this iteration code. | |
956 size = fc->size(); | |
957 } else { | |
958 // Note that the object need not necessarily be initialized, | |
959 // because (for instance) the free list lock does NOT protect | |
960 // object initialization. The closure application below must | |
961 // therefore be correct in the face of uninitialized objects. | |
962 size = cl->do_object_careful_m(oop(addr), mr); | |
963 if (size == 0) { | |
964 // An unparsable object found. Signal early termination. | |
965 return addr; | |
966 } | |
967 } | |
968 } | |
969 return NULL; | |
970 } | |
971 | |
972 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
973 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const { |
0 | 974 NOT_PRODUCT(verify_objects_initialized()); |
975 return _bt.block_start(p); | |
976 } | |
977 | |
978 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const { | |
979 return _bt.block_start_careful(p); | |
980 } | |
981 | |
982 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { | |
983 NOT_PRODUCT(verify_objects_initialized()); | |
984 // This must be volatile, or else there is a danger that the compiler | |
985 // will compile the code below into a sometimes-infinite loop, by keeping | |
986 // the value read the first time in a register. | |
987 while (true) { | |
988 // We must do this until we get a consistent view of the object. | |
187 | 989 if (FreeChunk::indicatesFreeChunk(p)) { |
990 volatile FreeChunk* fc = (volatile FreeChunk*)p; | |
991 size_t res = fc->size(); | |
992 // If the object is still a free chunk, return the size, else it | |
993 // has been allocated so try again. | |
994 if (FreeChunk::indicatesFreeChunk(p)) { | |
0 | 995 assert(res != 0, "Block size should not be 0"); |
996 return res; | |
997 } | |
187 | 998 } else { |
999 // must read from what 'p' points to in each loop. | |
1000 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); | |
1001 if (k != NULL) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1002 assert(k->is_oop(true /* ignore mark word */), "Should be klass oop"); |
187 | 1003 oop o = (oop)p; |
1004 assert(o->is_parsable(), "Should be parsable"); | |
1005 assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); | |
1006 size_t res = o->size_given_klass(k->klass_part()); | |
1007 res = adjustObjectSize(res); | |
1008 assert(res != 0, "Block size should not be 0"); | |
1009 return res; | |
1010 } | |
0 | 1011 } |
1012 } | |
1013 } | |
1014 | |
1015 // A variant of the above that uses the Printezis bits for | |
1016 // unparsable but allocated objects. This avoids any possible | |
1017 // stalls waiting for mutators to initialize objects, and is | |
1018 // thus potentially faster than the variant above. However, | |
1019 // this variant may return a zero size for a block that is | |
1020 // under mutation and for which a consistent size cannot be | |
1021 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits(). | |
1022 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p, | |
1023 const CMSCollector* c) | |
1024 const { | |
1025 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
1026 // This must be volatile, or else there is a danger that the compiler | |
1027 // will compile the code below into a sometimes-infinite loop, by keeping | |
1028 // the value read the first time in a register. | |
1029 DEBUG_ONLY(uint loops = 0;) | |
1030 while (true) { | |
1031 // We must do this until we get a consistent view of the object. | |
187 | 1032 if (FreeChunk::indicatesFreeChunk(p)) { |
1033 volatile FreeChunk* fc = (volatile FreeChunk*)p; | |
1034 size_t res = fc->size(); | |
1035 if (FreeChunk::indicatesFreeChunk(p)) { | |
0 | 1036 assert(res != 0, "Block size should not be 0"); |
1037 assert(loops == 0, "Should be 0"); | |
1038 return res; | |
1039 } | |
1040 } else { | |
187 | 1041 // must read from what 'p' points to in each loop. |
1042 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); | |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
1043 if (k != NULL && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
1044 ((oopDesc*)p)->is_parsable() && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
1045 ((oopDesc*)p)->is_conc_safe()) { |
187 | 1046 assert(k->is_oop(), "Should really be klass oop."); |
1047 oop o = (oop)p; | |
1048 assert(o->is_oop(), "Should be an oop"); | |
1049 size_t res = o->size_given_klass(k->klass_part()); | |
1050 res = adjustObjectSize(res); | |
1051 assert(res != 0, "Block size should not be 0"); | |
1052 return res; | |
1053 } else { | |
1054 return c->block_size_if_printezis_bits(p); | |
1055 } | |
0 | 1056 } |
1057 assert(loops == 0, "Can loop at most once"); | |
1058 DEBUG_ONLY(loops++;) | |
1059 } | |
1060 } | |
1061 | |
1062 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const { | |
1063 NOT_PRODUCT(verify_objects_initialized()); | |
1064 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
1065 FreeChunk* fc = (FreeChunk*)p; | |
1066 if (fc->isFree()) { | |
1067 return fc->size(); | |
1068 } else { | |
1069 // Ignore mark word because this may be a recently promoted | |
1070 // object whose mark word is used to chain together grey | |
1071 // objects (the last one would have a null value). | |
1072 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1073 return adjustObjectSize(oop(p)->size()); | |
1074 } | |
1075 } | |
1076 | |
1077 // This implementation assumes that the property of "being an object" is | |
1078 // stable. But being a free chunk may not be (because of parallel | |
1079 // promotion.) | |
1080 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const { | |
1081 FreeChunk* fc = (FreeChunk*)p; | |
1082 assert(is_in_reserved(p), "Should be in space"); | |
1083 // When doing a mark-sweep-compact of the CMS generation, this | |
1084 // assertion may fail because prepare_for_compaction() uses | |
1085 // space that is garbage to maintain information on ranges of | |
1086 // live objects so that these live ranges can be moved as a whole. | |
1087 // Comment out this assertion until that problem can be solved | |
1088 // (i.e., that the block start calculation may look at objects | |
1089 // at address below "p" in finding the object that contains "p" | |
1090 // and those objects (if garbage) may have been modified to hold | |
1091 // live range information. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
1092 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
1093 // "Should be a block boundary"); |
187 | 1094 if (FreeChunk::indicatesFreeChunk(p)) return false; |
1095 klassOop k = oop(p)->klass_or_null(); | |
0 | 1096 if (k != NULL) { |
1097 // Ignore mark word because it may have been used to | |
1098 // chain together promoted objects (the last one | |
1099 // would have a null value). | |
1100 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1101 return true; | |
1102 } else { | |
1103 return false; // Was not an object at the start of collection. | |
1104 } | |
1105 } | |
1106 | |
1107 // Check if the object is alive. This fact is checked either by consulting | |
1108 // the main marking bitmap in the sweeping phase or, if it's a permanent | |
1109 // generation and we're not in the sweeping phase, by checking the | |
1110 // perm_gen_verify_bit_map where we store the "deadness" information if | |
1111 // we did not sweep the perm gen in the most recent previous GC cycle. | |
1112 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const { | |
1959
9eecf81a02fb
7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
ysr
parents:
1952
diff
changeset
|
1113 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), |
9eecf81a02fb
7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
ysr
parents:
1952
diff
changeset
|
1114 "Else races are possible"); |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1115 assert(block_is_obj(p), "The address should point to an object"); |
0 | 1116 |
1117 // If we're sweeping, we use object liveness information from the main bit map | |
1118 // for both perm gen and old gen. | |
1119 // We don't need to lock the bitmap (live_map or dead_map below), because | |
1120 // EITHER we are in the middle of the sweeping phase, and the | |
1121 // main marking bit map (live_map below) is locked, | |
1122 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below) | |
1123 // is stable, because it's mutated only in the sweeping phase. | |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1124 // NOTE: This method is also used by jmap where, if class unloading is |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1125 // off, the results can return "false" for legitimate perm objects, |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1126 // when we are not in the midst of a sweeping phase, which can result |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1127 // in jmap not reporting certain perm gen objects. This will be moot |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1128 // if/when the perm gen goes away in the future. |
0 | 1129 if (_collector->abstract_state() == CMSCollector::Sweeping) { |
1130 CMSBitMap* live_map = _collector->markBitMap(); | |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1131 return live_map->par_isMarked((HeapWord*) p); |
0 | 1132 } else { |
1133 // If we're not currently sweeping and we haven't swept the perm gen in | |
1134 // the previous concurrent cycle then we may have dead but unswept objects | |
1135 // in the perm gen. In this case, we use the "deadness" information | |
1136 // that we had saved in perm_gen_verify_bit_map at the last sweep. | |
1137 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) { | |
1138 if (_collector->verifying()) { | |
1139 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map(); | |
1140 // Object is marked in the dead_map bitmap at the previous sweep | |
1141 // when we know that it's dead; if the bitmap is not allocated then | |
1142 // the object is alive. | |
1143 return (dead_map->sizeInBits() == 0) // bit_map has been allocated | |
1144 || !dead_map->par_isMarked((HeapWord*) p); | |
1145 } else { | |
1146 return false; // We can't say for sure if it's live, so we say that it's dead. | |
1147 } | |
1148 } | |
1149 } | |
1150 return true; | |
1151 } | |
1152 | |
1153 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const { | |
1154 FreeChunk* fc = (FreeChunk*)p; | |
1155 assert(is_in_reserved(p), "Should be in space"); | |
1156 assert(_bt.block_start(p) == p, "Should be a block boundary"); | |
1157 if (!fc->isFree()) { | |
1158 // Ignore mark word because it may have been used to | |
1159 // chain together promoted objects (the last one | |
1160 // would have a null value). | |
1161 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1162 return true; | |
1163 } | |
1164 return false; | |
1165 } | |
1166 | |
1167 // "MT-safe but not guaranteed MT-precise" (TM); you may get an | |
1168 // approximate answer if you don't hold the freelistlock when you call this. | |
1169 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const { | |
1170 size_t size = 0; | |
1171 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
1172 debug_only( | |
1173 // We may be calling here without the lock in which case we | |
1174 // won't do this modest sanity check. | |
1175 if (freelistLock()->owned_by_self()) { | |
1176 size_t total_list_size = 0; | |
1177 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
1178 fc = fc->next()) { | |
1179 total_list_size += i; | |
1180 } | |
1181 assert(total_list_size == i * _indexedFreeList[i].count(), | |
1182 "Count in list is incorrect"); | |
1183 } | |
1184 ) | |
1185 size += i * _indexedFreeList[i].count(); | |
1186 } | |
1187 return size; | |
1188 } | |
1189 | |
1190 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) { | |
1191 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
1192 return allocate(size); | |
1193 } | |
1194 | |
1195 HeapWord* | |
1196 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) { | |
1197 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size); | |
1198 } | |
1199 | |
1200 HeapWord* CompactibleFreeListSpace::allocate(size_t size) { | |
1201 assert_lock_strong(freelistLock()); | |
1202 HeapWord* res = NULL; | |
1203 assert(size == adjustObjectSize(size), | |
1204 "use adjustObjectSize() before calling into allocate()"); | |
1205 | |
1206 if (_adaptive_freelists) { | |
1207 res = allocate_adaptive_freelists(size); | |
1208 } else { // non-adaptive free lists | |
1209 res = allocate_non_adaptive_freelists(size); | |
1210 } | |
1211 | |
1212 if (res != NULL) { | |
1213 // check that res does lie in this space! | |
1214 assert(is_in_reserved(res), "Not in this space!"); | |
1215 assert(is_aligned((void*)res), "alignment check"); | |
1216 | |
1217 FreeChunk* fc = (FreeChunk*)res; | |
1218 fc->markNotFree(); | |
1219 assert(!fc->isFree(), "shouldn't be marked free"); | |
187 | 1220 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized"); |
0 | 1221 // Verify that the block offset table shows this to |
1222 // be a single block, but not one which is unallocated. | |
1223 _bt.verify_single_block(res, size); | |
1224 _bt.verify_not_unallocated(res, size); | |
1225 // mangle a just allocated object with a distinct pattern. | |
1226 debug_only(fc->mangleAllocated(size)); | |
1227 } | |
1228 | |
1229 return res; | |
1230 } | |
1231 | |
1232 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) { | |
1233 HeapWord* res = NULL; | |
1234 // try and use linear allocation for smaller blocks | |
1235 if (size < _smallLinearAllocBlock._allocation_size_limit) { | |
1236 // if successful, the following also adjusts block offset table | |
1237 res = getChunkFromSmallLinearAllocBlock(size); | |
1238 } | |
1239 // Else triage to indexed lists for smaller sizes | |
1240 if (res == NULL) { | |
1241 if (size < SmallForDictionary) { | |
1242 res = (HeapWord*) getChunkFromIndexedFreeList(size); | |
1243 } else { | |
1244 // else get it from the big dictionary; if even this doesn't | |
1245 // work we are out of luck. | |
1246 res = (HeapWord*)getChunkFromDictionaryExact(size); | |
1247 } | |
1248 } | |
1249 | |
1250 return res; | |
1251 } | |
1252 | |
1253 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { | |
1254 assert_lock_strong(freelistLock()); | |
1255 HeapWord* res = NULL; | |
1256 assert(size == adjustObjectSize(size), | |
1257 "use adjustObjectSize() before calling into allocate()"); | |
1258 | |
1259 // Strategy | |
1260 // if small | |
1261 // exact size from small object indexed list if small | |
1262 // small or large linear allocation block (linAB) as appropriate | |
1263 // take from lists of greater sized chunks | |
1264 // else | |
1265 // dictionary | |
1266 // small or large linear allocation block if it has the space | |
1267 // Try allocating exact size from indexTable first | |
1268 if (size < IndexSetSize) { | |
1269 res = (HeapWord*) getChunkFromIndexedFreeList(size); | |
1270 if(res != NULL) { | |
1271 assert(res != (HeapWord*)_indexedFreeList[size].head(), | |
1272 "Not removed from free list"); | |
1273 // no block offset table adjustment is necessary on blocks in | |
1274 // the indexed lists. | |
1275 | |
1276 // Try allocating from the small LinAB | |
1277 } else if (size < _smallLinearAllocBlock._allocation_size_limit && | |
1278 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) { | |
1279 // if successful, the above also adjusts block offset table | |
1280 // Note that this call will refill the LinAB to | |
1281 // satisfy the request. This is different that | |
1282 // evm. | |
1283 // Don't record chunk off a LinAB? smallSplitBirth(size); | |
1284 } else { | |
1285 // Raid the exact free lists larger than size, even if they are not | |
1286 // overpopulated. | |
1287 res = (HeapWord*) getChunkFromGreater(size); | |
1288 } | |
1289 } else { | |
1290 // Big objects get allocated directly from the dictionary. | |
1291 res = (HeapWord*) getChunkFromDictionaryExact(size); | |
1292 if (res == NULL) { | |
1293 // Try hard not to fail since an allocation failure will likely | |
1294 // trigger a synchronous GC. Try to get the space from the | |
1295 // allocation blocks. | |
1296 res = getChunkFromSmallLinearAllocBlockRemainder(size); | |
1297 } | |
1298 } | |
1299 | |
1300 return res; | |
1301 } | |
1302 | |
1303 // A worst-case estimate of the space required (in HeapWords) to expand the heap | |
1304 // when promoting obj. | |
1305 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const { | |
1306 // Depending on the object size, expansion may require refilling either a | |
1307 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize | |
1308 // is added because the dictionary may over-allocate to avoid fragmentation. | |
1309 size_t space = obj_size; | |
1310 if (!_adaptive_freelists) { | |
1311 space = MAX2(space, _smallLinearAllocBlock._refillSize); | |
1312 } | |
1313 space += _promoInfo.refillSize() + 2 * MinChunkSize; | |
1314 return space; | |
1315 } | |
1316 | |
1317 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { | |
1318 FreeChunk* ret; | |
1319 | |
1320 assert(numWords >= MinChunkSize, "Size is less than minimum"); | |
1321 assert(linearAllocationWouldFail() || bestFitFirst(), | |
1322 "Should not be here"); | |
1323 | |
1324 size_t i; | |
1325 size_t currSize = numWords + MinChunkSize; | |
1326 assert(currSize % MinObjAlignment == 0, "currSize should be aligned"); | |
1327 for (i = currSize; i < IndexSetSize; i += IndexSetStride) { | |
1328 FreeList* fl = &_indexedFreeList[i]; | |
1329 if (fl->head()) { | |
1330 ret = getFromListGreater(fl, numWords); | |
1331 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); | |
1332 return ret; | |
1333 } | |
1334 } | |
1335 | |
1336 currSize = MAX2((size_t)SmallForDictionary, | |
1337 (size_t)(numWords + MinChunkSize)); | |
1338 | |
1339 /* Try to get a chunk that satisfies request, while avoiding | |
1340 fragmentation that can't be handled. */ | |
1341 { | |
1342 ret = dictionary()->getChunk(currSize); | |
1343 if (ret != NULL) { | |
1344 assert(ret->size() - numWords >= MinChunkSize, | |
1345 "Chunk is too small"); | |
1346 _bt.allocated((HeapWord*)ret, ret->size()); | |
1347 /* Carve returned chunk. */ | |
1348 (void) splitChunkAndReturnRemainder(ret, numWords); | |
1349 /* Label this as no longer a free chunk. */ | |
1350 assert(ret->isFree(), "This chunk should be free"); | |
1351 ret->linkPrev(NULL); | |
1352 } | |
1353 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); | |
1354 return ret; | |
1355 } | |
1356 ShouldNotReachHere(); | |
1357 } | |
1358 | |
1359 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) | |
1360 const { | |
1361 assert(fc->size() < IndexSetSize, "Size of chunk is too large"); | |
1362 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc); | |
1363 } | |
1364 | |
1365 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const { | |
1366 if (fc->size() >= IndexSetSize) { | |
1367 return dictionary()->verifyChunkInFreeLists(fc); | |
1368 } else { | |
1369 return verifyChunkInIndexedFreeLists(fc); | |
1370 } | |
1371 } | |
1372 | |
1373 #ifndef PRODUCT | |
1374 void CompactibleFreeListSpace::assert_locked() const { | |
1375 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock()); | |
1376 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1377 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1378 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1379 CMSLockVerifier::assert_locked(lock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1380 } |
0 | 1381 #endif |
1382 | |
1383 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) { | |
1384 // In the parallel case, the main thread holds the free list lock | |
1385 // on behalf the parallel threads. | |
1386 FreeChunk* fc; | |
1387 { | |
1388 // If GC is parallel, this might be called by several threads. | |
1389 // This should be rare enough that the locking overhead won't affect | |
1390 // the sequential code. | |
1391 MutexLockerEx x(parDictionaryAllocLock(), | |
1392 Mutex::_no_safepoint_check_flag); | |
1393 fc = getChunkFromDictionary(size); | |
1394 } | |
1395 if (fc != NULL) { | |
1396 fc->dontCoalesce(); | |
1397 assert(fc->isFree(), "Should be free, but not coalescable"); | |
1398 // Verify that the block offset table shows this to | |
1399 // be a single block, but not one which is unallocated. | |
1400 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
1401 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
1402 } | |
1403 return fc; | |
1404 } | |
1405 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
1406 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) { |
0 | 1407 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1408 assert_locked(); | |
1409 | |
1410 // if we are tracking promotions, then first ensure space for | |
1411 // promotion (including spooling space for saving header if necessary). | |
1412 // then allocate and copy, then track promoted info if needed. | |
1413 // When tracking (see PromotionInfo::track()), the mark word may | |
1414 // be displaced and in this case restoration of the mark word | |
1415 // occurs in the (oop_since_save_marks_)iterate phase. | |
1416 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) { | |
1417 return NULL; | |
1418 } | |
1419 // Call the allocate(size_t, bool) form directly to avoid the | |
1420 // additional call through the allocate(size_t) form. Having | |
1421 // the compile inline the call is problematic because allocate(size_t) | |
1422 // is a virtual method. | |
1423 HeapWord* res = allocate(adjustObjectSize(obj_size)); | |
1424 if (res != NULL) { | |
1425 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size); | |
1426 // if we should be tracking promotions, do so. | |
1427 if (_promoInfo.tracking()) { | |
1428 _promoInfo.track((PromotedObject*)res); | |
1429 } | |
1430 } | |
1431 return oop(res); | |
1432 } | |
1433 | |
1434 HeapWord* | |
1435 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) { | |
1436 assert_locked(); | |
1437 assert(size >= MinChunkSize, "minimum chunk size"); | |
1438 assert(size < _smallLinearAllocBlock._allocation_size_limit, | |
1439 "maximum from smallLinearAllocBlock"); | |
1440 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size); | |
1441 } | |
1442 | |
1443 HeapWord* | |
1444 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk, | |
1445 size_t size) { | |
1446 assert_locked(); | |
1447 assert(size >= MinChunkSize, "too small"); | |
1448 HeapWord* res = NULL; | |
1449 // Try to do linear allocation from blk, making sure that | |
1450 if (blk->_word_size == 0) { | |
1451 // We have probably been unable to fill this either in the prologue or | |
1452 // when it was exhausted at the last linear allocation. Bail out until | |
1453 // next time. | |
1454 assert(blk->_ptr == NULL, "consistency check"); | |
1455 return NULL; | |
1456 } | |
1457 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check"); | |
1458 res = getChunkFromLinearAllocBlockRemainder(blk, size); | |
1459 if (res != NULL) return res; | |
1460 | |
1461 // about to exhaust this linear allocation block | |
1462 if (blk->_word_size == size) { // exactly satisfied | |
1463 res = blk->_ptr; | |
1464 _bt.allocated(res, blk->_word_size); | |
1465 } else if (size + MinChunkSize <= blk->_refillSize) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1466 size_t sz = blk->_word_size; |
0 | 1467 // Update _unallocated_block if the size is such that chunk would be |
1468 // returned to the indexed free list. All other chunks in the indexed | |
1469 // free lists are allocated from the dictionary so that _unallocated_block | |
1470 // has already been adjusted for them. Do it here so that the cost | |
1471 // for all chunks added back to the indexed free lists. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1472 if (sz < SmallForDictionary) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1473 _bt.allocated(blk->_ptr, sz); |
0 | 1474 } |
1475 // Return the chunk that isn't big enough, and then refill below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1476 addChunkToFreeLists(blk->_ptr, sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1477 splitBirth(sz); |
0 | 1478 // Don't keep statistics on adding back chunk from a LinAB. |
1479 } else { | |
1480 // A refilled block would not satisfy the request. | |
1481 return NULL; | |
1482 } | |
1483 | |
1484 blk->_ptr = NULL; blk->_word_size = 0; | |
1485 refillLinearAllocBlock(blk); | |
1486 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize, | |
1487 "block was replenished"); | |
1488 if (res != NULL) { | |
1489 splitBirth(size); | |
1490 repairLinearAllocBlock(blk); | |
1491 } else if (blk->_ptr != NULL) { | |
1492 res = blk->_ptr; | |
1493 size_t blk_size = blk->_word_size; | |
1494 blk->_word_size -= size; | |
1495 blk->_ptr += size; | |
1496 splitBirth(size); | |
1497 repairLinearAllocBlock(blk); | |
1498 // Update BOT last so that other (parallel) GC threads see a consistent | |
1499 // view of the BOT and free blocks. | |
1500 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1501 OrderAccess::storestore(); |
0 | 1502 _bt.split_block(res, blk_size, size); // adjust block offset table |
1503 } | |
1504 return res; | |
1505 } | |
1506 | |
1507 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder( | |
1508 LinearAllocBlock* blk, | |
1509 size_t size) { | |
1510 assert_locked(); | |
1511 assert(size >= MinChunkSize, "too small"); | |
1512 | |
1513 HeapWord* res = NULL; | |
1514 // This is the common case. Keep it simple. | |
1515 if (blk->_word_size >= size + MinChunkSize) { | |
1516 assert(blk->_ptr != NULL, "consistency check"); | |
1517 res = blk->_ptr; | |
1518 // Note that the BOT is up-to-date for the linAB before allocation. It | |
1519 // indicates the start of the linAB. The split_block() updates the | |
1520 // BOT for the linAB after the allocation (indicates the start of the | |
1521 // next chunk to be allocated). | |
1522 size_t blk_size = blk->_word_size; | |
1523 blk->_word_size -= size; | |
1524 blk->_ptr += size; | |
1525 splitBirth(size); | |
1526 repairLinearAllocBlock(blk); | |
1527 // Update BOT last so that other (parallel) GC threads see a consistent | |
1528 // view of the BOT and free blocks. | |
1529 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1530 OrderAccess::storestore(); |
0 | 1531 _bt.split_block(res, blk_size, size); // adjust block offset table |
1532 _bt.allocated(res, size); | |
1533 } | |
1534 return res; | |
1535 } | |
1536 | |
1537 FreeChunk* | |
1538 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) { | |
1539 assert_locked(); | |
1540 assert(size < SmallForDictionary, "just checking"); | |
1541 FreeChunk* res; | |
1542 res = _indexedFreeList[size].getChunkAtHead(); | |
1543 if (res == NULL) { | |
1544 res = getChunkFromIndexedFreeListHelper(size); | |
1545 } | |
1546 _bt.verify_not_unallocated((HeapWord*) res, size); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1547 assert(res == NULL || res->size() == size, "Incorrect block size"); |
0 | 1548 return res; |
1549 } | |
1550 | |
1551 FreeChunk* | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1552 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1553 bool replenish) { |
0 | 1554 assert_locked(); |
1555 FreeChunk* fc = NULL; | |
1556 if (size < SmallForDictionary) { | |
1557 assert(_indexedFreeList[size].head() == NULL || | |
1558 _indexedFreeList[size].surplus() <= 0, | |
1559 "List for this size should be empty or under populated"); | |
1560 // Try best fit in exact lists before replenishing the list | |
1561 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) { | |
1562 // Replenish list. | |
1563 // | |
1564 // Things tried that failed. | |
1565 // Tried allocating out of the two LinAB's first before | |
1566 // replenishing lists. | |
1567 // Tried small linAB of size 256 (size in indexed list) | |
1568 // and replenishing indexed lists from the small linAB. | |
1569 // | |
1570 FreeChunk* newFc = NULL; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1571 const size_t replenish_size = CMSIndexedFreeListReplenish * size; |
0 | 1572 if (replenish_size < SmallForDictionary) { |
1573 // Do not replenish from an underpopulated size. | |
1574 if (_indexedFreeList[replenish_size].surplus() > 0 && | |
1575 _indexedFreeList[replenish_size].head() != NULL) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1576 newFc = _indexedFreeList[replenish_size].getChunkAtHead(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1577 } else if (bestFitFirst()) { |
0 | 1578 newFc = bestFitSmall(replenish_size); |
1579 } | |
1580 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1581 if (newFc == NULL && replenish_size > size) { |
0 | 1582 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1583 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false); |
0 | 1584 } |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1585 // Note: The stats update re split-death of block obtained above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1586 // will be recorded below precisely when we know we are going to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1587 // be actually splitting it into more than one pieces below. |
0 | 1588 if (newFc != NULL) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1589 if (replenish || CMSReplenishIntermediate) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1590 // Replenish this list and return one block to caller. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1591 size_t i; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1592 FreeChunk *curFc, *nextFc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1593 size_t num_blk = newFc->size() / size; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1594 assert(num_blk >= 1, "Smaller than requested?"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1595 assert(newFc->size() % size == 0, "Should be integral multiple of request"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1596 if (num_blk > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1597 // we are sure we will be splitting the block just obtained |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1598 // into multiple pieces; record the split-death of the original |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1599 splitDeath(replenish_size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1600 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1601 // carve up and link blocks 0, ..., num_blk - 2 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1602 // The last chunk is not added to the lists but is returned as the |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1603 // free chunk. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1604 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1605 i = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1606 i < (num_blk - 1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1607 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1608 i++) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1609 curFc->setSize(size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1610 // Don't record this as a return in order to try and |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1611 // determine the "returns" from a GC. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1612 _bt.verify_not_unallocated((HeapWord*) fc, size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1613 _indexedFreeList[size].returnChunkAtTail(curFc, false); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1614 _bt.mark_block((HeapWord*)curFc, size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1615 splitBirth(size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1616 // Don't record the initial population of the indexed list |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1617 // as a split birth. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1618 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1619 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1620 // check that the arithmetic was OK above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1621 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1622 "inconsistency in carving newFc"); |
0 | 1623 curFc->setSize(size); |
1624 _bt.mark_block((HeapWord*)curFc, size); | |
1625 splitBirth(size); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1626 fc = curFc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1627 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1628 // Return entire block to caller |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1629 fc = newFc; |
0 | 1630 } |
1631 } | |
1632 } | |
1633 } else { | |
1634 // Get a free chunk from the free chunk dictionary to be returned to | |
1635 // replenish the indexed free list. | |
1636 fc = getChunkFromDictionaryExact(size); | |
1637 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1638 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk"); |
0 | 1639 return fc; |
1640 } | |
1641 | |
1642 FreeChunk* | |
1643 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) { | |
1644 assert_locked(); | |
1645 FreeChunk* fc = _dictionary->getChunk(size); | |
1646 if (fc == NULL) { | |
1647 return NULL; | |
1648 } | |
1649 _bt.allocated((HeapWord*)fc, fc->size()); | |
1650 if (fc->size() >= size + MinChunkSize) { | |
1651 fc = splitChunkAndReturnRemainder(fc, size); | |
1652 } | |
1653 assert(fc->size() >= size, "chunk too small"); | |
1654 assert(fc->size() < size + MinChunkSize, "chunk too big"); | |
1655 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
1656 return fc; | |
1657 } | |
1658 | |
1659 FreeChunk* | |
1660 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) { | |
1661 assert_locked(); | |
1662 FreeChunk* fc = _dictionary->getChunk(size); | |
1663 if (fc == NULL) { | |
1664 return fc; | |
1665 } | |
1666 _bt.allocated((HeapWord*)fc, fc->size()); | |
1667 if (fc->size() == size) { | |
1668 _bt.verify_single_block((HeapWord*)fc, size); | |
1669 return fc; | |
1670 } | |
1671 assert(fc->size() > size, "getChunk() guarantee"); | |
1672 if (fc->size() < size + MinChunkSize) { | |
1673 // Return the chunk to the dictionary and go get a bigger one. | |
1674 returnChunkToDictionary(fc); | |
1675 fc = _dictionary->getChunk(size + MinChunkSize); | |
1676 if (fc == NULL) { | |
1677 return NULL; | |
1678 } | |
1679 _bt.allocated((HeapWord*)fc, fc->size()); | |
1680 } | |
1681 assert(fc->size() >= size + MinChunkSize, "tautology"); | |
1682 fc = splitChunkAndReturnRemainder(fc, size); | |
1683 assert(fc->size() == size, "chunk is wrong size"); | |
1684 _bt.verify_single_block((HeapWord*)fc, size); | |
1685 return fc; | |
1686 } | |
1687 | |
1688 void | |
1689 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) { | |
1690 assert_locked(); | |
1691 | |
1692 size_t size = chunk->size(); | |
1693 _bt.verify_single_block((HeapWord*)chunk, size); | |
1694 // adjust _unallocated_block downward, as necessary | |
1695 _bt.freed((HeapWord*)chunk, size); | |
1696 _dictionary->returnChunk(chunk); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1697 #ifndef PRODUCT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1698 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1699 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1700 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1701 #endif // PRODUCT |
0 | 1702 } |
1703 | |
1704 void | |
1705 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) { | |
1706 assert_locked(); | |
1707 size_t size = fc->size(); | |
1708 _bt.verify_single_block((HeapWord*) fc, size); | |
1709 _bt.verify_not_unallocated((HeapWord*) fc, size); | |
1710 if (_adaptive_freelists) { | |
1711 _indexedFreeList[size].returnChunkAtTail(fc); | |
1712 } else { | |
1713 _indexedFreeList[size].returnChunkAtHead(fc); | |
1714 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1715 #ifndef PRODUCT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1716 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1717 _indexedFreeList[size].verify_stats(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1718 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1719 #endif // PRODUCT |
0 | 1720 } |
1721 | |
1722 // Add chunk to end of last block -- if it's the largest | |
1723 // block -- and update BOT and census data. We would | |
1724 // of course have preferred to coalesce it with the | |
1725 // last block, but it's currently less expensive to find the | |
1726 // largest block than it is to find the last. | |
1727 void | |
1728 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats( | |
1729 HeapWord* chunk, size_t size) { | |
1730 // check that the chunk does lie in this space! | |
1731 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); | |
1732 // One of the parallel gc task threads may be here | |
1733 // whilst others are allocating. | |
1734 Mutex* lock = NULL; | |
1735 if (ParallelGCThreads != 0) { | |
1736 lock = &_parDictionaryAllocLock; | |
1737 } | |
1738 FreeChunk* ec; | |
1739 { | |
1740 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
1741 ec = dictionary()->findLargestDict(); // get largest block | |
1742 if (ec != NULL && ec->end() == chunk) { | |
1743 // It's a coterminal block - we can coalesce. | |
1744 size_t old_size = ec->size(); | |
1745 coalDeath(old_size); | |
1746 removeChunkFromDictionary(ec); | |
1747 size += old_size; | |
1748 } else { | |
1749 ec = (FreeChunk*)chunk; | |
1750 } | |
1751 } | |
1752 ec->setSize(size); | |
1753 debug_only(ec->mangleFreed(size)); | |
1754 if (size < SmallForDictionary) { | |
1755 lock = _indexedFreeListParLocks[size]; | |
1756 } | |
1757 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
1758 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true); | |
1759 // record the birth under the lock since the recording involves | |
1760 // manipulation of the list on which the chunk lives and | |
1761 // if the chunk is allocated and is the last on the list, | |
1762 // the list can go away. | |
1763 coalBirth(size); | |
1764 } | |
1765 | |
1766 void | |
1767 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk, | |
1768 size_t size) { | |
1769 // check that the chunk does lie in this space! | |
1770 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); | |
1771 assert_locked(); | |
1772 _bt.verify_single_block(chunk, size); | |
1773 | |
1774 FreeChunk* fc = (FreeChunk*) chunk; | |
1775 fc->setSize(size); | |
1776 debug_only(fc->mangleFreed(size)); | |
1777 if (size < SmallForDictionary) { | |
1778 returnChunkToFreeList(fc); | |
1779 } else { | |
1780 returnChunkToDictionary(fc); | |
1781 } | |
1782 } | |
1783 | |
1784 void | |
1785 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk, | |
1786 size_t size, bool coalesced) { | |
1787 assert_locked(); | |
1788 assert(chunk != NULL, "null chunk"); | |
1789 if (coalesced) { | |
1790 // repair BOT | |
1791 _bt.single_block(chunk, size); | |
1792 } | |
1793 addChunkToFreeLists(chunk, size); | |
1794 } | |
1795 | |
1796 // We _must_ find the purported chunk on our free lists; | |
1797 // we assert if we don't. | |
1798 void | |
1799 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) { | |
1800 size_t size = fc->size(); | |
1801 assert_locked(); | |
1802 debug_only(verifyFreeLists()); | |
1803 if (size < SmallForDictionary) { | |
1804 removeChunkFromIndexedFreeList(fc); | |
1805 } else { | |
1806 removeChunkFromDictionary(fc); | |
1807 } | |
1808 _bt.verify_single_block((HeapWord*)fc, size); | |
1809 debug_only(verifyFreeLists()); | |
1810 } | |
1811 | |
1812 void | |
1813 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) { | |
1814 size_t size = fc->size(); | |
1815 assert_locked(); | |
1816 assert(fc != NULL, "null chunk"); | |
1817 _bt.verify_single_block((HeapWord*)fc, size); | |
1818 _dictionary->removeChunk(fc); | |
1819 // adjust _unallocated_block upward, as necessary | |
1820 _bt.allocated((HeapWord*)fc, size); | |
1821 } | |
1822 | |
1823 void | |
1824 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) { | |
1825 assert_locked(); | |
1826 size_t size = fc->size(); | |
1827 _bt.verify_single_block((HeapWord*)fc, size); | |
1828 NOT_PRODUCT( | |
1829 if (FLSVerifyIndexTable) { | |
1830 verifyIndexedFreeList(size); | |
1831 } | |
1832 ) | |
1833 _indexedFreeList[size].removeChunk(fc); | |
1834 debug_only(fc->clearNext()); | |
1835 debug_only(fc->clearPrev()); | |
1836 NOT_PRODUCT( | |
1837 if (FLSVerifyIndexTable) { | |
1838 verifyIndexedFreeList(size); | |
1839 } | |
1840 ) | |
1841 } | |
1842 | |
1843 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { | |
1844 /* A hint is the next larger size that has a surplus. | |
1845 Start search at a size large enough to guarantee that | |
1846 the excess is >= MIN_CHUNK. */ | |
1847 size_t start = align_object_size(numWords + MinChunkSize); | |
1848 if (start < IndexSetSize) { | |
1849 FreeList* it = _indexedFreeList; | |
1850 size_t hint = _indexedFreeList[start].hint(); | |
1851 while (hint < IndexSetSize) { | |
1852 assert(hint % MinObjAlignment == 0, "hint should be aligned"); | |
1853 FreeList *fl = &_indexedFreeList[hint]; | |
1854 if (fl->surplus() > 0 && fl->head() != NULL) { | |
1855 // Found a list with surplus, reset original hint | |
1856 // and split out a free chunk which is returned. | |
1857 _indexedFreeList[start].set_hint(hint); | |
1858 FreeChunk* res = getFromListGreater(fl, numWords); | |
1859 assert(res == NULL || res->isFree(), | |
1860 "Should be returning a free chunk"); | |
1861 return res; | |
1862 } | |
1863 hint = fl->hint(); /* keep looking */ | |
1864 } | |
1865 /* None found. */ | |
1866 it[start].set_hint(IndexSetSize); | |
1867 } | |
1868 return NULL; | |
1869 } | |
1870 | |
1871 /* Requires fl->size >= numWords + MinChunkSize */ | |
1872 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl, | |
1873 size_t numWords) { | |
1874 FreeChunk *curr = fl->head(); | |
1875 size_t oldNumWords = curr->size(); | |
1876 assert(numWords >= MinChunkSize, "Word size is too small"); | |
1877 assert(curr != NULL, "List is empty"); | |
1878 assert(oldNumWords >= numWords + MinChunkSize, | |
1879 "Size of chunks in the list is too small"); | |
1880 | |
1881 fl->removeChunk(curr); | |
1882 // recorded indirectly by splitChunkAndReturnRemainder - | |
1883 // smallSplit(oldNumWords, numWords); | |
1884 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords); | |
1885 // Does anything have to be done for the remainder in terms of | |
1886 // fixing the card table? | |
1887 assert(new_chunk == NULL || new_chunk->isFree(), | |
1888 "Should be returning a free chunk"); | |
1889 return new_chunk; | |
1890 } | |
1891 | |
1892 FreeChunk* | |
1893 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk, | |
1894 size_t new_size) { | |
1895 assert_locked(); | |
1896 size_t size = chunk->size(); | |
1897 assert(size > new_size, "Split from a smaller block?"); | |
1898 assert(is_aligned(chunk), "alignment problem"); | |
1899 assert(size == adjustObjectSize(size), "alignment problem"); | |
1900 size_t rem_size = size - new_size; | |
1901 assert(rem_size == adjustObjectSize(rem_size), "alignment problem"); | |
1902 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum"); | |
1903 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size); | |
1904 assert(is_aligned(ffc), "alignment problem"); | |
1905 ffc->setSize(rem_size); | |
1906 ffc->linkNext(NULL); | |
1907 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. | |
1908 // Above must occur before BOT is updated below. | |
1909 // adjust block offset table | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1910 OrderAccess::storestore(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1911 assert(chunk->isFree() && ffc->isFree(), "Error"); |
0 | 1912 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); |
1913 if (rem_size < SmallForDictionary) { | |
1914 bool is_par = (SharedHeap::heap()->n_par_threads() > 0); | |
1915 if (is_par) _indexedFreeListParLocks[rem_size]->lock(); | |
1916 returnChunkToFreeList(ffc); | |
1917 split(size, rem_size); | |
1918 if (is_par) _indexedFreeListParLocks[rem_size]->unlock(); | |
1919 } else { | |
1920 returnChunkToDictionary(ffc); | |
1921 split(size ,rem_size); | |
1922 } | |
1923 chunk->setSize(new_size); | |
1924 return chunk; | |
1925 } | |
1926 | |
1927 void | |
1928 CompactibleFreeListSpace::sweep_completed() { | |
1929 // Now that space is probably plentiful, refill linear | |
1930 // allocation blocks as needed. | |
1931 refillLinearAllocBlocksIfNeeded(); | |
1932 } | |
1933 | |
1934 void | |
1935 CompactibleFreeListSpace::gc_prologue() { | |
1936 assert_locked(); | |
1937 if (PrintFLSStatistics != 0) { | |
1938 gclog_or_tty->print("Before GC:\n"); | |
1939 reportFreeListStatistics(); | |
1940 } | |
1941 refillLinearAllocBlocksIfNeeded(); | |
1942 } | |
1943 | |
1944 void | |
1945 CompactibleFreeListSpace::gc_epilogue() { | |
1946 assert_locked(); | |
1947 if (PrintGCDetails && Verbose && !_adaptive_freelists) { | |
1948 if (_smallLinearAllocBlock._word_size == 0) | |
1949 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure"); | |
1950 } | |
1951 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); | |
1952 _promoInfo.stopTrackingPromotions(); | |
1953 repairLinearAllocationBlocks(); | |
1954 // Print Space's stats | |
1955 if (PrintFLSStatistics != 0) { | |
1956 gclog_or_tty->print("After GC:\n"); | |
1957 reportFreeListStatistics(); | |
1958 } | |
1959 } | |
1960 | |
1961 // Iteration support, mostly delegated from a CMS generation | |
1962 | |
1963 void CompactibleFreeListSpace::save_marks() { | |
1964 // mark the "end" of the used space at the time of this call; | |
1965 // note, however, that promoted objects from this point | |
1966 // on are tracked in the _promoInfo below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1967 set_saved_mark_word(unallocated_block()); |
0 | 1968 // inform allocator that promotions should be tracked. |
1969 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); | |
1970 _promoInfo.startTrackingPromotions(); | |
1971 } | |
1972 | |
1973 bool CompactibleFreeListSpace::no_allocs_since_save_marks() { | |
1974 assert(_promoInfo.tracking(), "No preceding save_marks?"); | |
1777
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
1975 assert(SharedHeap::heap()->n_par_threads() == 0, |
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
1976 "Shouldn't be called if using parallel gc."); |
0 | 1977 return _promoInfo.noPromotions(); |
1978 } | |
1979 | |
1980 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
1981 \ | |
1982 void CompactibleFreeListSpace:: \ | |
1983 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ | |
1984 assert(SharedHeap::heap()->n_par_threads() == 0, \ | |
1985 "Shouldn't be called (yet) during parallel part of gc."); \ | |
1986 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \ | |
1987 /* \ | |
1988 * This also restores any displaced headers and removes the elements from \ | |
1989 * the iteration set as they are processed, so that we have a clean slate \ | |
1990 * at the end of the iteration. Note, thus, that if new objects are \ | |
1991 * promoted as a result of the iteration they are iterated over as well. \ | |
1992 */ \ | |
1993 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \ | |
1994 } | |
1995 | |
1996 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN) | |
1997 | |
1998 | |
1999 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2000 // ugghh... how would one do this efficiently for a non-contiguous space? | |
2001 guarantee(false, "NYI"); | |
2002 } | |
2003 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2004 bool CompactibleFreeListSpace::linearAllocationWouldFail() const { |
0 | 2005 return _smallLinearAllocBlock._word_size == 0; |
2006 } | |
2007 | |
2008 void CompactibleFreeListSpace::repairLinearAllocationBlocks() { | |
2009 // Fix up linear allocation blocks to look like free blocks | |
2010 repairLinearAllocBlock(&_smallLinearAllocBlock); | |
2011 } | |
2012 | |
2013 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) { | |
2014 assert_locked(); | |
2015 if (blk->_ptr != NULL) { | |
2016 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize, | |
2017 "Minimum block size requirement"); | |
2018 FreeChunk* fc = (FreeChunk*)(blk->_ptr); | |
2019 fc->setSize(blk->_word_size); | |
2020 fc->linkPrev(NULL); // mark as free | |
2021 fc->dontCoalesce(); | |
2022 assert(fc->isFree(), "just marked it free"); | |
2023 assert(fc->cantCoalesce(), "just marked it uncoalescable"); | |
2024 } | |
2025 } | |
2026 | |
2027 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() { | |
2028 assert_locked(); | |
2029 if (_smallLinearAllocBlock._ptr == NULL) { | |
2030 assert(_smallLinearAllocBlock._word_size == 0, | |
2031 "Size of linAB should be zero if the ptr is NULL"); | |
2032 // Reset the linAB refill and allocation size limit. | |
2033 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc); | |
2034 } | |
2035 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock); | |
2036 } | |
2037 | |
2038 void | |
2039 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) { | |
2040 assert_locked(); | |
2041 assert((blk->_ptr == NULL && blk->_word_size == 0) || | |
2042 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize), | |
2043 "blk invariant"); | |
2044 if (blk->_ptr == NULL) { | |
2045 refillLinearAllocBlock(blk); | |
2046 } | |
2047 if (PrintMiscellaneous && Verbose) { | |
2048 if (blk->_word_size == 0) { | |
2049 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure"); | |
2050 } | |
2051 } | |
2052 } | |
2053 | |
2054 void | |
2055 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) { | |
2056 assert_locked(); | |
2057 assert(blk->_word_size == 0 && blk->_ptr == NULL, | |
2058 "linear allocation block should be empty"); | |
2059 FreeChunk* fc; | |
2060 if (blk->_refillSize < SmallForDictionary && | |
2061 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) { | |
2062 // A linAB's strategy might be to use small sizes to reduce | |
2063 // fragmentation but still get the benefits of allocation from a | |
2064 // linAB. | |
2065 } else { | |
2066 fc = getChunkFromDictionary(blk->_refillSize); | |
2067 } | |
2068 if (fc != NULL) { | |
2069 blk->_ptr = (HeapWord*)fc; | |
2070 blk->_word_size = fc->size(); | |
2071 fc->dontCoalesce(); // to prevent sweeper from sweeping us up | |
2072 } | |
2073 } | |
2074 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2075 // Support for concurrent collection policy decisions. |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2076 bool CompactibleFreeListSpace::should_concurrent_collect() const { |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2077 // In the future we might want to add in frgamentation stats -- |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2078 // including erosion of the "mountain" into this decision as well. |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2079 return !adaptive_freelists() && linearAllocationWouldFail(); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2080 } |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2081 |
0 | 2082 // Support for compaction |
2083 | |
2084 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { | |
2085 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); | |
2086 // prepare_for_compaction() uses the space between live objects | |
2087 // so that later phase can skip dead space quickly. So verification | |
2088 // of the free lists doesn't work after. | |
2089 } | |
2090 | |
2091 #define obj_size(q) adjustObjectSize(oop(q)->size()) | |
2092 #define adjust_obj_size(s) adjustObjectSize(s) | |
2093 | |
2094 void CompactibleFreeListSpace::adjust_pointers() { | |
2095 // In other versions of adjust_pointers(), a bail out | |
2096 // based on the amount of live data in the generation | |
2097 // (i.e., if 0, bail out) may be used. | |
2098 // Cannot test used() == 0 here because the free lists have already | |
2099 // been mangled by the compaction. | |
2100 | |
2101 SCAN_AND_ADJUST_POINTERS(adjust_obj_size); | |
2102 // See note about verification in prepare_for_compaction(). | |
2103 } | |
2104 | |
2105 void CompactibleFreeListSpace::compact() { | |
2106 SCAN_AND_COMPACT(obj_size); | |
2107 } | |
2108 | |
2109 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] | |
2110 // where fbs is free block sizes | |
2111 double CompactibleFreeListSpace::flsFrag() const { | |
2112 size_t itabFree = totalSizeInIndexedFreeLists(); | |
2113 double frag = 0.0; | |
2114 size_t i; | |
2115 | |
2116 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2117 double sz = i; | |
2118 frag += _indexedFreeList[i].count() * (sz * sz); | |
2119 } | |
2120 | |
2121 double totFree = itabFree + | |
2122 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); | |
2123 if (totFree > 0) { | |
2124 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / | |
2125 (totFree * totFree)); | |
2126 frag = (double)1.0 - frag; | |
2127 } else { | |
2128 assert(frag == 0.0, "Follows from totFree == 0"); | |
2129 } | |
2130 return frag; | |
2131 } | |
2132 | |
2133 void CompactibleFreeListSpace::beginSweepFLCensus( | |
2134 float inter_sweep_current, | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2135 float inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2136 float intra_sweep_estimate) { |
0 | 2137 assert_locked(); |
2138 size_t i; | |
2139 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2140 FreeList* fl = &_indexedFreeList[i]; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2141 if (PrintFLSStatistics > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2142 gclog_or_tty->print("size[%d] : ", i); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2143 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2144 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2145 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent)); |
0 | 2146 fl->set_beforeSweep(fl->count()); |
2147 fl->set_bfrSurp(fl->surplus()); | |
2148 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2149 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent, |
0 | 2150 inter_sweep_current, |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2151 inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2152 intra_sweep_estimate); |
0 | 2153 } |
2154 | |
2155 void CompactibleFreeListSpace::setFLSurplus() { | |
2156 assert_locked(); | |
2157 size_t i; | |
2158 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2159 FreeList *fl = &_indexedFreeList[i]; | |
2160 fl->set_surplus(fl->count() - | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2161 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent)); |
0 | 2162 } |
2163 } | |
2164 | |
2165 void CompactibleFreeListSpace::setFLHints() { | |
2166 assert_locked(); | |
2167 size_t i; | |
2168 size_t h = IndexSetSize; | |
2169 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { | |
2170 FreeList *fl = &_indexedFreeList[i]; | |
2171 fl->set_hint(h); | |
2172 if (fl->surplus() > 0) { | |
2173 h = i; | |
2174 } | |
2175 } | |
2176 } | |
2177 | |
2178 void CompactibleFreeListSpace::clearFLCensus() { | |
2179 assert_locked(); | |
2180 int i; | |
2181 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2182 FreeList *fl = &_indexedFreeList[i]; | |
2183 fl->set_prevSweep(fl->count()); | |
2184 fl->set_coalBirths(0); | |
2185 fl->set_coalDeaths(0); | |
2186 fl->set_splitBirths(0); | |
2187 fl->set_splitDeaths(0); | |
2188 } | |
2189 } | |
2190 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2191 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2192 if (PrintFLSStatistics > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2193 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2194 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2195 largestAddr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2196 } |
0 | 2197 setFLSurplus(); |
2198 setFLHints(); | |
2199 if (PrintGC && PrintFLSCensus > 0) { | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2200 printFLCensus(sweep_count); |
0 | 2201 } |
2202 clearFLCensus(); | |
2203 assert_locked(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2204 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent); |
0 | 2205 } |
2206 | |
2207 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { | |
2208 if (size < SmallForDictionary) { | |
2209 FreeList *fl = &_indexedFreeList[size]; | |
2210 return (fl->coalDesired() < 0) || | |
2211 ((int)fl->count() > fl->coalDesired()); | |
2212 } else { | |
2213 return dictionary()->coalDictOverPopulated(size); | |
2214 } | |
2215 } | |
2216 | |
2217 void CompactibleFreeListSpace::smallCoalBirth(size_t size) { | |
2218 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2219 FreeList *fl = &_indexedFreeList[size]; | |
2220 fl->increment_coalBirths(); | |
2221 fl->increment_surplus(); | |
2222 } | |
2223 | |
2224 void CompactibleFreeListSpace::smallCoalDeath(size_t size) { | |
2225 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2226 FreeList *fl = &_indexedFreeList[size]; | |
2227 fl->increment_coalDeaths(); | |
2228 fl->decrement_surplus(); | |
2229 } | |
2230 | |
2231 void CompactibleFreeListSpace::coalBirth(size_t size) { | |
2232 if (size < SmallForDictionary) { | |
2233 smallCoalBirth(size); | |
2234 } else { | |
2235 dictionary()->dictCensusUpdate(size, | |
2236 false /* split */, | |
2237 true /* birth */); | |
2238 } | |
2239 } | |
2240 | |
2241 void CompactibleFreeListSpace::coalDeath(size_t size) { | |
2242 if(size < SmallForDictionary) { | |
2243 smallCoalDeath(size); | |
2244 } else { | |
2245 dictionary()->dictCensusUpdate(size, | |
2246 false /* split */, | |
2247 false /* birth */); | |
2248 } | |
2249 } | |
2250 | |
2251 void CompactibleFreeListSpace::smallSplitBirth(size_t size) { | |
2252 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2253 FreeList *fl = &_indexedFreeList[size]; | |
2254 fl->increment_splitBirths(); | |
2255 fl->increment_surplus(); | |
2256 } | |
2257 | |
2258 void CompactibleFreeListSpace::smallSplitDeath(size_t size) { | |
2259 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2260 FreeList *fl = &_indexedFreeList[size]; | |
2261 fl->increment_splitDeaths(); | |
2262 fl->decrement_surplus(); | |
2263 } | |
2264 | |
2265 void CompactibleFreeListSpace::splitBirth(size_t size) { | |
2266 if (size < SmallForDictionary) { | |
2267 smallSplitBirth(size); | |
2268 } else { | |
2269 dictionary()->dictCensusUpdate(size, | |
2270 true /* split */, | |
2271 true /* birth */); | |
2272 } | |
2273 } | |
2274 | |
2275 void CompactibleFreeListSpace::splitDeath(size_t size) { | |
2276 if (size < SmallForDictionary) { | |
2277 smallSplitDeath(size); | |
2278 } else { | |
2279 dictionary()->dictCensusUpdate(size, | |
2280 true /* split */, | |
2281 false /* birth */); | |
2282 } | |
2283 } | |
2284 | |
2285 void CompactibleFreeListSpace::split(size_t from, size_t to1) { | |
2286 size_t to2 = from - to1; | |
2287 splitDeath(from); | |
2288 splitBirth(to1); | |
2289 splitBirth(to2); | |
2290 } | |
2291 | |
2292 void CompactibleFreeListSpace::print() const { | |
1952
4df7f8cba524
6996613: CompactibleFreeListSpace::print should call CompactibleFreeListSpace::print_on, not Space::print_on
ysr
parents:
1951
diff
changeset
|
2293 print_on(tty); |
0 | 2294 } |
2295 | |
2296 void CompactibleFreeListSpace::prepare_for_verify() { | |
2297 assert_locked(); | |
2298 repairLinearAllocationBlocks(); | |
2299 // Verify that the SpoolBlocks look like free blocks of | |
2300 // appropriate sizes... To be done ... | |
2301 } | |
2302 | |
2303 class VerifyAllBlksClosure: public BlkClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2304 private: |
0 | 2305 const CompactibleFreeListSpace* _sp; |
2306 const MemRegion _span; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2307 HeapWord* _last_addr; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2308 size_t _last_size; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2309 bool _last_was_obj; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2310 bool _last_was_live; |
0 | 2311 |
2312 public: | |
2313 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2314 MemRegion span) : _sp(sp), _span(span), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2315 _last_addr(NULL), _last_size(0), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2316 _last_was_obj(false), _last_was_live(false) { } |
0 | 2317 |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2318 virtual size_t do_blk(HeapWord* addr) { |
0 | 2319 size_t res; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2320 bool was_obj = false; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2321 bool was_live = false; |
0 | 2322 if (_sp->block_is_obj(addr)) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2323 was_obj = true; |
0 | 2324 oop p = oop(addr); |
2325 guarantee(p->is_oop(), "Should be an oop"); | |
2326 res = _sp->adjustObjectSize(p->size()); | |
2327 if (_sp->obj_is_alive(addr)) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2328 was_live = true; |
0 | 2329 p->verify(); |
2330 } | |
2331 } else { | |
2332 FreeChunk* fc = (FreeChunk*)addr; | |
2333 res = fc->size(); | |
2334 if (FLSVerifyLists && !fc->cantCoalesce()) { | |
2335 guarantee(_sp->verifyChunkInFreeLists(fc), | |
2336 "Chunk should be on a free list"); | |
2337 } | |
2338 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2339 if (res == 0) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2340 gclog_or_tty->print_cr("Livelock: no rank reduction!"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2341 gclog_or_tty->print_cr( |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2342 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n" |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2343 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2344 addr, res, was_obj ?"true":"false", was_live ?"true":"false", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2345 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2346 _sp->print_on(gclog_or_tty); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2347 guarantee(false, "Seppuku!"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2348 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2349 _last_addr = addr; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2350 _last_size = res; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2351 _last_was_obj = was_obj; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2352 _last_was_live = was_live; |
0 | 2353 return res; |
2354 } | |
2355 }; | |
2356 | |
2357 class VerifyAllOopsClosure: public OopClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2358 private: |
0 | 2359 const CMSCollector* _collector; |
2360 const CompactibleFreeListSpace* _sp; | |
2361 const MemRegion _span; | |
2362 const bool _past_remark; | |
2363 const CMSBitMap* _bit_map; | |
2364 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2365 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2366 void do_oop(void* p, oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2367 if (_span.contains(obj)) { // the interior oop points into CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2368 if (!_span.contains(p)) { // reference from outside CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2369 // Should be a valid object; the first disjunct below allows |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2370 // us to sidestep an assertion in block_is_obj() that insists |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2371 // that p be in _sp. Note that several generations (and spaces) |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2372 // are spanned by _span (CMS heap) above. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2373 guarantee(!_sp->is_in_reserved(obj) || |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2374 _sp->block_is_obj((HeapWord*)obj), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2375 "Should be an object"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2376 guarantee(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2377 obj->verify(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2378 if (_past_remark) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2379 // Remark has been completed, the object should be marked |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2380 _bit_map->isMarked((HeapWord*)obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2381 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2382 } else { // reference within CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2383 if (_past_remark) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2384 // Remark has been completed -- so the referent should have |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2385 // been marked, if referring object is. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2386 if (_bit_map->isMarked(_collector->block_start(p))) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2387 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2388 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2389 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2390 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2391 } else if (_sp->is_in_reserved(p)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2392 // the reference is from FLS, and points out of FLS |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2393 guarantee(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2394 obj->verify(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2395 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2396 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2397 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2398 template <class T> void do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2399 T heap_oop = oopDesc::load_heap_oop(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2400 if (!oopDesc::is_null(heap_oop)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2401 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2402 do_oop(p, obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2403 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2404 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2405 |
0 | 2406 public: |
2407 VerifyAllOopsClosure(const CMSCollector* collector, | |
2408 const CompactibleFreeListSpace* sp, MemRegion span, | |
2409 bool past_remark, CMSBitMap* bit_map) : | |
2410 OopClosure(), _collector(collector), _sp(sp), _span(span), | |
2411 _past_remark(past_remark), _bit_map(bit_map) { } | |
2412 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2413 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2414 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
0 | 2415 }; |
2416 | |
2417 void CompactibleFreeListSpace::verify(bool ignored) const { | |
2418 assert_lock_strong(&_freelistLock); | |
2419 verify_objects_initialized(); | |
2420 MemRegion span = _collector->_span; | |
2421 bool past_remark = (_collector->abstract_state() == | |
2422 CMSCollector::Sweeping); | |
2423 | |
2424 ResourceMark rm; | |
2425 HandleMark hm; | |
2426 | |
2427 // Check integrity of CFL data structures | |
2428 _promoInfo.verify(); | |
2429 _dictionary->verify(); | |
2430 if (FLSVerifyIndexTable) { | |
2431 verifyIndexedFreeLists(); | |
2432 } | |
2433 // Check integrity of all objects and free blocks in space | |
2434 { | |
2435 VerifyAllBlksClosure cl(this, span); | |
2436 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const | |
2437 } | |
2438 // Check that all references in the heap to FLS | |
2439 // are to valid objects in FLS or that references in | |
2440 // FLS are to valid objects elsewhere in the heap | |
2441 if (FLSVerifyAllHeapReferences) | |
2442 { | |
2443 VerifyAllOopsClosure cl(_collector, this, span, past_remark, | |
2444 _collector->markBitMap()); | |
2445 CollectedHeap* ch = Universe::heap(); | |
2446 ch->oop_iterate(&cl); // all oops in generations | |
2447 ch->permanent_oop_iterate(&cl); // all oops in perm gen | |
2448 } | |
2449 | |
2450 if (VerifyObjectStartArray) { | |
2451 // Verify the block offset table | |
2452 _bt.verify(); | |
2453 } | |
2454 } | |
2455 | |
2456 #ifndef PRODUCT | |
2457 void CompactibleFreeListSpace::verifyFreeLists() const { | |
2458 if (FLSVerifyLists) { | |
2459 _dictionary->verify(); | |
2460 verifyIndexedFreeLists(); | |
2461 } else { | |
2462 if (FLSVerifyDictionary) { | |
2463 _dictionary->verify(); | |
2464 } | |
2465 if (FLSVerifyIndexTable) { | |
2466 verifyIndexedFreeLists(); | |
2467 } | |
2468 } | |
2469 } | |
2470 #endif | |
2471 | |
2472 void CompactibleFreeListSpace::verifyIndexedFreeLists() const { | |
2473 size_t i = 0; | |
2474 for (; i < MinChunkSize; i++) { | |
2475 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL"); | |
2476 } | |
2477 for (; i < IndexSetSize; i++) { | |
2478 verifyIndexedFreeList(i); | |
2479 } | |
2480 } | |
2481 | |
2482 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2483 FreeChunk* fc = _indexedFreeList[size].head(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2484 FreeChunk* tail = _indexedFreeList[size].tail(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2485 size_t num = _indexedFreeList[size].count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2486 size_t n = 0; |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2487 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2488 for (; fc != NULL; fc = fc->next(), n++) { |
0 | 2489 guarantee(fc->size() == size, "Size inconsistency"); |
2490 guarantee(fc->isFree(), "!free?"); | |
2491 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list"); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2492 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail"); |
0 | 2493 } |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2494 guarantee(n == num, "Incorrect count"); |
0 | 2495 } |
2496 | |
2497 #ifndef PRODUCT | |
2498 void CompactibleFreeListSpace::checkFreeListConsistency() const { | |
2499 assert(_dictionary->minSize() <= IndexSetSize, | |
2500 "Some sizes can't be allocated without recourse to" | |
2501 " linear allocation buffers"); | |
2502 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk), | |
2503 "else MIN_TREE_CHUNK_SIZE is wrong"); | |
2504 assert((IndexSetStride == 2 && IndexSetStart == 2) || | |
2505 (IndexSetStride == 1 && IndexSetStart == 1), "just checking"); | |
2506 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0), | |
2507 "Some for-loops may be incorrectly initialized"); | |
2508 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1), | |
2509 "For-loops that iterate over IndexSet with stride 2 may be wrong"); | |
2510 } | |
2511 #endif | |
2512 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2513 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { |
0 | 2514 assert_lock_strong(&_freelistLock); |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2515 FreeList total; |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2516 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2517 FreeList::print_labels_on(gclog_or_tty, "size"); |
0 | 2518 size_t totalFree = 0; |
2519 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2520 const FreeList *fl = &_indexedFreeList[i]; | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2521 totalFree += fl->count() * fl->size(); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2522 if (i % (40*IndexSetStride) == 0) { |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2523 FreeList::print_labels_on(gclog_or_tty, "size"); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2524 } |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2525 fl->print_on(gclog_or_tty); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2526 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2527 total.set_surplus( total.surplus() + fl->surplus() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2528 total.set_desired( total.desired() + fl->desired() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2529 total.set_prevSweep( total.prevSweep() + fl->prevSweep() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2530 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep()); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2531 total.set_count( total.count() + fl->count() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2532 total.set_coalBirths( total.coalBirths() + fl->coalBirths() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2533 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2534 total.set_splitBirths(total.splitBirths() + fl->splitBirths()); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2535 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths()); |
0 | 2536 } |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2537 total.print_on(gclog_or_tty, "TOTAL"); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2538 gclog_or_tty->print_cr("Total free in indexed lists " |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2539 SIZE_FORMAT " words", totalFree); |
0 | 2540 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n", |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2541 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/ |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2542 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0), |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2543 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); |
0 | 2544 _dictionary->printDictCensus(); |
2545 } | |
2546 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2547 /////////////////////////////////////////////////////////////////////////// |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2548 // CFLS_LAB |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2549 /////////////////////////////////////////////////////////////////////////// |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2550 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2551 #define VECTOR_257(x) \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2552 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2553 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2554 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2555 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2556 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2557 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2558 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2559 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2560 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2561 x } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2562 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2563 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2564 // OldPLABSize, whose static default is different; if overridden at the |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2565 // command-line, this will get reinitialized via a call to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2566 // modify_initialization() below. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2567 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] = |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2568 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim)); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2569 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2570 int CFLS_LAB::_global_num_workers[] = VECTOR_257(0); |
0 | 2571 |
2572 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) : | |
2573 _cfls(cfls) | |
2574 { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2575 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above"); |
0 | 2576 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
2577 i < CompactibleFreeListSpace::IndexSetSize; | |
2578 i += CompactibleFreeListSpace::IndexSetStride) { | |
2579 _indexedFreeList[i].set_size(i); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2580 _num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2581 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2582 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2583 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2584 static bool _CFLS_LAB_modified = false; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2585 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2586 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2587 assert(!_CFLS_LAB_modified, "Call only once"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2588 _CFLS_LAB_modified = true; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2589 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2590 i < CompactibleFreeListSpace::IndexSetSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2591 i += CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2592 _blocks_to_claim[i].modify(n, wt, true /* force */); |
0 | 2593 } |
2594 } | |
2595 | |
2596 HeapWord* CFLS_LAB::alloc(size_t word_sz) { | |
2597 FreeChunk* res; | |
1777
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
2598 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error"); |
0 | 2599 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { |
2600 // This locking manages sync with other large object allocations. | |
2601 MutexLockerEx x(_cfls->parDictionaryAllocLock(), | |
2602 Mutex::_no_safepoint_check_flag); | |
2603 res = _cfls->getChunkFromDictionaryExact(word_sz); | |
2604 if (res == NULL) return NULL; | |
2605 } else { | |
2606 FreeList* fl = &_indexedFreeList[word_sz]; | |
2607 if (fl->count() == 0) { | |
2608 // Attempt to refill this local free list. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2609 get_from_global_pool(word_sz, fl); |
0 | 2610 // If it didn't work, give up. |
2611 if (fl->count() == 0) return NULL; | |
2612 } | |
2613 res = fl->getChunkAtHead(); | |
2614 assert(res != NULL, "Why was count non-zero?"); | |
2615 } | |
2616 res->markNotFree(); | |
2617 assert(!res->isFree(), "shouldn't be marked free"); | |
187 | 2618 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized"); |
0 | 2619 // mangle a just allocated object with a distinct pattern. |
2620 debug_only(res->mangleAllocated(word_sz)); | |
2621 return (HeapWord*)res; | |
2622 } | |
2623 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2624 // Get a chunk of blocks of the right size and update related |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2625 // book-keeping stats |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2626 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2627 // Get the #blocks we want to claim |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2628 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2629 assert(n_blks > 0, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2630 assert(ResizePLAB || n_blks == OldPLABSize, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2631 // In some cases, when the application has a phase change, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2632 // there may be a sudden and sharp shift in the object survival |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2633 // profile, and updating the counts at the end of a scavenge |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2634 // may not be quick enough, giving rise to large scavenge pauses |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2635 // during these phase changes. It is beneficial to detect such |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2636 // changes on-the-fly during a scavenge and avoid such a phase-change |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2637 // pothole. The following code is a heuristic attempt to do that. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2638 // It is protected by a product flag until we have gained |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2639 // enough experience with this heuristic and fine-tuned its behaviour. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2640 // WARNING: This might increase fragmentation if we overreact to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2641 // small spikes, so some kind of historical smoothing based on |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2642 // previous experience with the greater reactivity might be useful. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2643 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2644 // default. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2645 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2646 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2647 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2648 n_blks = MIN2(n_blks, CMSOldPLABMax); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2649 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2650 assert(n_blks > 0, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2651 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2652 // Update stats table entry for this block size |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2653 _num_blocks[word_sz] += fl->count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2654 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2655 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2656 void CFLS_LAB::compute_desired_plab_size() { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2657 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
0 | 2658 i < CompactibleFreeListSpace::IndexSetSize; |
2659 i += CompactibleFreeListSpace::IndexSetStride) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2660 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2661 "Counter inconsistency"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2662 if (_global_num_workers[i] > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2663 // Need to smooth wrt historical average |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2664 if (ResizeOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2665 _blocks_to_claim[i].sample( |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2666 MAX2((size_t)CMSOldPLABMin, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2667 MIN2((size_t)CMSOldPLABMax, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2668 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills)))); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2669 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2670 // Reset counters for next round |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2671 _global_num_workers[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2672 _global_num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2673 if (PrintOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2674 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2675 } |
0 | 2676 } |
2677 } | |
2678 } | |
2679 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2680 void CFLS_LAB::retire(int tid) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2681 // We run this single threaded with the world stopped; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2682 // so no need for locks and such. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2683 #define CFLS_LAB_PARALLEL_ACCESS 0 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2684 NOT_PRODUCT(Thread* t = Thread::current();) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2685 assert(Thread::current()->is_VM_thread(), "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2686 assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2687 "Will access to uninitialized slot below"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2688 #if CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2689 for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2690 i > 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2691 i -= CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2692 #else // CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2693 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2694 i < CompactibleFreeListSpace::IndexSetSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2695 i += CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2696 #endif // !CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2697 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2698 "Can't retire more than what we obtained"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2699 if (_num_blocks[i] > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2700 size_t num_retire = _indexedFreeList[i].count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2701 assert(_num_blocks[i] > num_retire, "Should have used at least one"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2702 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2703 #if CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2704 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2705 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2706 #endif // CFLS_LAB_PARALLEL_ACCESS |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2707 // Update globals stats for num_blocks used |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2708 _global_num_blocks[i] += (_num_blocks[i] - num_retire); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2709 _global_num_workers[i]++; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2710 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2711 if (num_retire > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2712 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2713 // Reset this list. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2714 _indexedFreeList[i] = FreeList(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2715 _indexedFreeList[i].set_size(i); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2716 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2717 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2718 if (PrintOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2719 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2720 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2721 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2722 // Reset stats for next round |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2723 _num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2724 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2725 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2726 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2727 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2728 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) { |
0 | 2729 assert(fl->count() == 0, "Precondition."); |
2730 assert(word_sz < CompactibleFreeListSpace::IndexSetSize, | |
2731 "Precondition"); | |
2732 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2733 // We'll try all multiples of word_sz in the indexed set, starting with |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2734 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2735 // then try getting a big chunk and splitting it. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2736 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2737 bool found; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2738 int k; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2739 size_t cur_sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2740 for (k = 1, cur_sz = k * word_sz, found = false; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2741 (cur_sz < CompactibleFreeListSpace::IndexSetSize) && |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2742 (CMSSplitIndexedFreeListBlocks || k <= 1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2743 k++, cur_sz = k * word_sz) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2744 FreeList fl_for_cur_sz; // Empty. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2745 fl_for_cur_sz.set_size(cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2746 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2747 MutexLockerEx x(_indexedFreeListParLocks[cur_sz], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2748 Mutex::_no_safepoint_check_flag); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2749 FreeList* gfl = &_indexedFreeList[cur_sz]; |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2750 if (gfl->count() != 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2751 // nn is the number of chunks of size cur_sz that |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2752 // we'd need to split k-ways each, in order to create |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2753 // "n" chunks of size word_sz each. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2754 const size_t nn = MAX2(n/k, (size_t)1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2755 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2756 found = true; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2757 if (k > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2758 // Update split death stats for the cur_sz-size blocks list: |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2759 // we increment the split death count by the number of blocks |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2760 // we just took from the cur_sz-size blocks list and which |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2761 // we will be splitting below. |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2762 ssize_t deaths = gfl->splitDeaths() + |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2763 fl_for_cur_sz.count(); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2764 gfl->set_splitDeaths(deaths); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2765 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2766 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2767 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2768 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2769 if (found) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2770 if (k == 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2771 fl->prepend(&fl_for_cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2772 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2773 // Divide each block on fl_for_cur_sz up k ways. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2774 FreeChunk* fc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2775 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2776 // Must do this in reverse order, so that anybody attempting to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2777 // access the main chunk sees it as a single free block until we |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2778 // change it. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2779 size_t fc_size = fc->size(); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2780 assert(fc->isFree(), "Error"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2781 for (int i = k-1; i >= 0; i--) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2782 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2783 assert((i != 0) || |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2784 ((fc == ffc) && ffc->isFree() && |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2785 (ffc->size() == k*word_sz) && (fc_size == word_sz)), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2786 "Counting error"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2787 ffc->setSize(word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2788 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2789 ffc->linkNext(NULL); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2790 // Above must occur before BOT is updated below. |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2791 OrderAccess::storestore(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2792 // splitting from the right, fc_size == i * word_sz |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2793 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2794 fc_size -= word_sz; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2795 assert(fc_size == i*word_sz, "Error"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2796 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2797 _bt.verify_single_block((HeapWord*)fc, fc_size); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2798 _bt.verify_single_block((HeapWord*)ffc, word_sz); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2799 // Push this on "fl". |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2800 fl->returnChunkAtHead(ffc); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2801 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2802 // TRAP |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2803 assert(fl->tail()->next() == NULL, "List invariant."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2804 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2805 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2806 // Update birth stats for this block size. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2807 size_t num = fl->count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2808 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2809 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2810 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2811 _indexedFreeList[word_sz].set_splitBirths(births); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2812 return; |
0 | 2813 } |
2814 } | |
2815 } | |
2816 // Otherwise, we'll split a block from the dictionary. | |
2817 FreeChunk* fc = NULL; | |
2818 FreeChunk* rem_fc = NULL; | |
2819 size_t rem; | |
2820 { | |
2821 MutexLockerEx x(parDictionaryAllocLock(), | |
2822 Mutex::_no_safepoint_check_flag); | |
2823 while (n > 0) { | |
2824 fc = dictionary()->getChunk(MAX2(n * word_sz, | |
2825 _dictionary->minSize()), | |
2826 FreeBlockDictionary::atLeast); | |
2827 if (fc != NULL) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2828 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk |
0 | 2829 dictionary()->dictCensusUpdate(fc->size(), |
2830 true /*split*/, | |
2831 false /*birth*/); | |
2832 break; | |
2833 } else { | |
2834 n--; | |
2835 } | |
2836 } | |
2837 if (fc == NULL) return; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2838 // Otherwise, split up that block. |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2839 assert((ssize_t)n >= 1, "Control point invariant"); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2840 assert(fc->isFree(), "Error: should be a free block"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2841 _bt.verify_single_block((HeapWord*)fc, fc->size()); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2842 const size_t nn = fc->size() / word_sz; |
0 | 2843 n = MIN2(nn, n); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2844 assert((ssize_t)n >= 1, "Control point invariant"); |
0 | 2845 rem = fc->size() - n * word_sz; |
2846 // If there is a remainder, and it's too small, allocate one fewer. | |
2847 if (rem > 0 && rem < MinChunkSize) { | |
2848 n--; rem += word_sz; | |
2849 } | |
1148
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2850 // Note that at this point we may have n == 0. |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2851 assert((ssize_t)n >= 0, "Control point invariant"); |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2852 |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2853 // If n is 0, the chunk fc that was found is not large |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2854 // enough to leave a viable remainder. We are unable to |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2855 // allocate even one block. Return fc to the |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2856 // dictionary and return, leaving "fl" empty. |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2857 if (n == 0) { |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2858 returnChunkToDictionary(fc); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2859 assert(fl->count() == 0, "We never allocated any blocks"); |
1148
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2860 return; |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2861 } |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2862 |
0 | 2863 // First return the remainder, if any. |
2864 // Note that we hold the lock until we decide if we're going to give | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2865 // back the remainder to the dictionary, since a concurrent allocation |
0 | 2866 // may otherwise see the heap as empty. (We're willing to take that |
2867 // hit if the block is a small block.) | |
2868 if (rem > 0) { | |
2869 size_t prefix_size = n * word_sz; | |
2870 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); | |
2871 rem_fc->setSize(rem); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2872 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
0 | 2873 rem_fc->linkNext(NULL); |
2874 // Above must occur before BOT is updated below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2875 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error"); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2876 OrderAccess::storestore(); |
0 | 2877 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2878 assert(fc->isFree(), "Error"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2879 fc->setSize(prefix_size); |
0 | 2880 if (rem >= IndexSetSize) { |
2881 returnChunkToDictionary(rem_fc); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2882 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/); |
0 | 2883 rem_fc = NULL; |
2884 } | |
2885 // Otherwise, return it to the small list below. | |
2886 } | |
2887 } | |
2888 if (rem_fc != NULL) { | |
2889 MutexLockerEx x(_indexedFreeListParLocks[rem], | |
2890 Mutex::_no_safepoint_check_flag); | |
2891 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size()); | |
2892 _indexedFreeList[rem].returnChunkAtHead(rem_fc); | |
2893 smallSplitBirth(rem); | |
2894 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2895 assert((ssize_t)n > 0 && fc != NULL, "Consistency"); |
0 | 2896 // Now do the splitting up. |
2897 // Must do this in reverse order, so that anybody attempting to | |
2898 // access the main chunk sees it as a single free block until we | |
2899 // change it. | |
2900 size_t fc_size = n * word_sz; | |
2901 // All but first chunk in this loop | |
2902 for (ssize_t i = n-1; i > 0; i--) { | |
2903 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); | |
2904 ffc->setSize(word_sz); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2905 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
0 | 2906 ffc->linkNext(NULL); |
2907 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2908 OrderAccess::storestore(); |
0 | 2909 // splitting from the right, fc_size == (n - i + 1) * wordsize |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2910 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); |
0 | 2911 fc_size -= word_sz; |
2912 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); | |
2913 _bt.verify_single_block((HeapWord*)ffc, ffc->size()); | |
2914 _bt.verify_single_block((HeapWord*)fc, fc_size); | |
2915 // Push this on "fl". | |
2916 fl->returnChunkAtHead(ffc); | |
2917 } | |
2918 // First chunk | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2919 assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2920 // The blocks above should show their new sizes before the first block below |
0 | 2921 fc->setSize(word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2922 fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above |
0 | 2923 fc->linkNext(NULL); |
2924 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
2925 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
2926 fl->returnChunkAtHead(fc); | |
2927 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2928 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks"); |
0 | 2929 { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2930 // Update the stats for this block size. |
0 | 2931 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
2932 Mutex::_no_safepoint_check_flag); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2933 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2934 _indexedFreeList[word_sz].set_splitBirths(births); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2935 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2936 // _indexedFreeList[word_sz].set_surplus(new_surplus); |
0 | 2937 } |
2938 | |
2939 // TRAP | |
2940 assert(fl->tail()->next() == NULL, "List invariant."); | |
2941 } | |
2942 | |
2943 // Set up the space's par_seq_tasks structure for work claiming | |
2944 // for parallel rescan. See CMSParRemarkTask where this is currently used. | |
2945 // XXX Need to suitably abstract and generalize this and the next | |
2946 // method into one. | |
2947 void | |
2948 CompactibleFreeListSpace:: | |
2949 initialize_sequential_subtasks_for_rescan(int n_threads) { | |
2950 // The "size" of each task is fixed according to rescan_task_size. | |
2951 assert(n_threads > 0, "Unexpected n_threads argument"); | |
2952 const size_t task_size = rescan_task_size(); | |
2953 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size; | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2954 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2955 assert(n_tasks == 0 || |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2956 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) && |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2957 (used_region().start() + n_tasks*task_size >= used_region().end())), |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2958 "n_tasks calculation incorrect"); |
0 | 2959 SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
2960 assert(!pst->valid(), "Clobbering existing data?"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2961 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2962 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2963 pst->set_n_threads(n_threads); |
0 | 2964 pst->set_n_tasks((int)n_tasks); |
2965 } | |
2966 | |
2967 // Set up the space's par_seq_tasks structure for work claiming | |
2968 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used. | |
2969 void | |
2970 CompactibleFreeListSpace:: | |
2971 initialize_sequential_subtasks_for_marking(int n_threads, | |
2972 HeapWord* low) { | |
2973 // The "size" of each task is fixed according to rescan_task_size. | |
2974 assert(n_threads > 0, "Unexpected n_threads argument"); | |
2975 const size_t task_size = marking_task_size(); | |
2976 assert(task_size > CardTableModRefBS::card_size_in_words && | |
2977 (task_size % CardTableModRefBS::card_size_in_words == 0), | |
2978 "Otherwise arithmetic below would be incorrect"); | |
2979 MemRegion span = _gen->reserved(); | |
2980 if (low != NULL) { | |
2981 if (span.contains(low)) { | |
2982 // Align low down to a card boundary so that | |
2983 // we can use block_offset_careful() on span boundaries. | |
2984 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low, | |
2985 CardTableModRefBS::card_size); | |
2986 // Clip span prefix at aligned_low | |
2987 span = span.intersection(MemRegion(aligned_low, span.end())); | |
2988 } else if (low > span.end()) { | |
2989 span = MemRegion(low, low); // Null region | |
2990 } // else use entire span | |
2991 } | |
2992 assert(span.is_empty() || | |
2993 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0), | |
2994 "span should start at a card boundary"); | |
2995 size_t n_tasks = (span.word_size() + task_size - 1)/task_size; | |
2996 assert((n_tasks == 0) == span.is_empty(), "Inconsistency"); | |
2997 assert(n_tasks == 0 || | |
2998 ((span.start() + (n_tasks - 1)*task_size < span.end()) && | |
2999 (span.start() + n_tasks*task_size >= span.end())), | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
3000 "n_tasks calculation incorrect"); |
0 | 3001 SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
3002 assert(!pst->valid(), "Clobbering existing data?"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3003 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3004 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3005 pst->set_n_threads(n_threads); |
0 | 3006 pst->set_n_tasks((int)n_tasks); |
3007 } |