Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @ 5938:2c0751569716
7152954: G1: Native memory leak during full GCs
Summary: Add destructor to TruncatedSeq and call delete when necessary
Reviewed-by: johnc, tonyp
author | brutisso |
---|---|
date | Tue, 13 Mar 2012 21:12:53 +0100 |
parents | 441e946dc1af |
children | b632e80fc9dc |
rev | line source |
---|---|
0 | 1 /* |
2426
1d1603768966
7010070: Update all 2010 Oracle-changed OpenJDK files to have the proper copyright dates - second pass
trims
parents:
2226
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp" | |
27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" | |
28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" | |
29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" | |
30 #include "gc_implementation/shared/liveRange.hpp" | |
31 #include "gc_implementation/shared/spaceDecorator.hpp" | |
32 #include "gc_interface/collectedHeap.hpp" | |
33 #include "memory/allocation.inline.hpp" | |
34 #include "memory/blockOffsetTable.inline.hpp" | |
35 #include "memory/resourceArea.hpp" | |
36 #include "memory/universe.inline.hpp" | |
37 #include "oops/oop.inline.hpp" | |
38 #include "runtime/globals.hpp" | |
39 #include "runtime/handles.inline.hpp" | |
40 #include "runtime/init.hpp" | |
41 #include "runtime/java.hpp" | |
42 #include "runtime/vmThread.hpp" | |
43 #include "utilities/copy.hpp" | |
0 | 44 |
45 ///////////////////////////////////////////////////////////////////////// | |
46 //// CompactibleFreeListSpace | |
47 ///////////////////////////////////////////////////////////////////////// | |
48 | |
49 // highest ranked free list lock rank | |
50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3; | |
51 | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
52 // Defaults are 0 so things will break badly if incorrectly initialized. |
4068
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
53 size_t CompactibleFreeListSpace::IndexSetStart = 0; |
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
54 size_t CompactibleFreeListSpace::IndexSetStride = 0; |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
55 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
56 size_t MinChunkSize = 0; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
57 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
58 void CompactibleFreeListSpace::set_cms_values() { |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
59 // Set CMS global values |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
60 assert(MinChunkSize == 0, "already set"); |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
61 #define numQuanta(x,y) ((x+y-1)/y) |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
62 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
63 |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
64 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set"); |
4068
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
65 IndexSetStart = MinChunkSize; |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
66 IndexSetStride = MinObjAlignment; |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
67 } |
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
68 |
0 | 69 // Constructor |
70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, | |
71 MemRegion mr, bool use_adaptive_freelists, | |
72 FreeBlockDictionary::DictionaryChoice dictionaryChoice) : | |
73 _dictionaryChoice(dictionaryChoice), | |
74 _adaptive_freelists(use_adaptive_freelists), | |
75 _bt(bs, mr), | |
76 // free list locks are in the range of values taken by _lockRank | |
77 // This range currently is [_leaf+2, _leaf+3] | |
78 // Note: this requires that CFLspace c'tors | |
79 // are called serially in the order in which the locks are | |
80 // are acquired in the program text. This is true today. | |
81 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true), | |
82 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1 | |
83 "CompactibleFreeListSpace._dict_par_lock", true), | |
84 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * | |
85 CMSRescanMultiple), | |
86 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * | |
87 CMSConcMarkMultiple), | |
88 _collector(NULL) | |
89 { | |
90 _bt.set_space(this); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
187
diff
changeset
|
91 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
0 | 92 // We have all of "mr", all of which we place in the dictionary |
93 // as one big chunk. We'll need to decide here which of several | |
94 // possible alternative dictionary implementations to use. For | |
95 // now the choice is easy, since we have only one working | |
96 // implementation, namely, the simple binary tree (splaying | |
97 // temporarily disabled). | |
98 switch (dictionaryChoice) { | |
99 case FreeBlockDictionary::dictionarySplayTree: | |
100 case FreeBlockDictionary::dictionarySkipList: | |
101 default: | |
102 warning("dictionaryChoice: selected option not understood; using" | |
103 " default BinaryTreeDictionary implementation instead."); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
104 case FreeBlockDictionary::dictionaryBinaryTree: |
0 | 105 _dictionary = new BinaryTreeDictionary(mr); |
106 break; | |
107 } | |
108 assert(_dictionary != NULL, "CMS dictionary initialization"); | |
109 // The indexed free lists are initially all empty and are lazily | |
110 // filled in on demand. Initialize the array elements to NULL. | |
111 initializeIndexedFreeListArray(); | |
112 | |
113 // Not using adaptive free lists assumes that allocation is first | |
114 // from the linAB's. Also a cms perm gen which can be compacted | |
115 // has to have the klass's klassKlass allocated at a lower | |
116 // address in the heap than the klass so that the klassKlass is | |
117 // moved to its new location before the klass is moved. | |
118 // Set the _refillSize for the linear allocation blocks | |
119 if (!use_adaptive_freelists) { | |
120 FreeChunk* fc = _dictionary->getChunk(mr.word_size()); | |
121 // The small linAB initially has all the space and will allocate | |
122 // a chunk of any size. | |
123 HeapWord* addr = (HeapWord*) fc; | |
124 _smallLinearAllocBlock.set(addr, fc->size() , | |
125 1024*SmallForLinearAlloc, fc->size()); | |
126 // Note that _unallocated_block is not updated here. | |
127 // Allocations from the linear allocation block should | |
128 // update it. | |
129 } else { | |
130 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, | |
131 SmallForLinearAlloc); | |
132 } | |
133 // CMSIndexedFreeListReplenish should be at least 1 | |
134 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish); | |
135 _promoInfo.setSpace(this); | |
136 if (UseCMSBestFit) { | |
137 _fitStrategy = FreeBlockBestFitFirst; | |
138 } else { | |
139 _fitStrategy = FreeBlockStrategyNone; | |
140 } | |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
141 check_free_list_consistency(); |
0 | 142 |
143 // Initialize locks for parallel case. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
144 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
145 if (CollectedHeap::use_parallel_gc_threads()) { |
0 | 146 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
147 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 | |
148 "a freelist par lock", | |
149 true); | |
150 if (_indexedFreeListParLocks[i] == NULL) | |
151 vm_exit_during_initialization("Could not allocate a par lock"); | |
152 DEBUG_ONLY( | |
153 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); | |
154 ) | |
155 } | |
156 _dictionary->set_par_lock(&_parDictionaryAllocLock); | |
157 } | |
158 } | |
159 | |
160 // Like CompactibleSpace forward() but always calls cross_threshold() to | |
161 // update the block offset table. Removed initialize_threshold call because | |
162 // CFLS does not use a block offset array for contiguous spaces. | |
163 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, | |
164 CompactPoint* cp, HeapWord* compact_top) { | |
165 // q is alive | |
166 // First check if we should switch compaction space | |
167 assert(this == cp->space, "'this' should be current compaction space."); | |
168 size_t compaction_max_size = pointer_delta(end(), compact_top); | |
169 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size), | |
170 "virtual adjustObjectSize_v() method is not correct"); | |
171 size_t adjusted_size = adjustObjectSize(size); | |
172 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0, | |
173 "no small fragments allowed"); | |
174 assert(minimum_free_block_size() == MinChunkSize, | |
175 "for de-virtualized reference below"); | |
176 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize | |
177 if (adjusted_size + MinChunkSize > compaction_max_size && | |
178 adjusted_size != compaction_max_size) { | |
179 do { | |
180 // switch to next compaction space | |
181 cp->space->set_compaction_top(compact_top); | |
182 cp->space = cp->space->next_compaction_space(); | |
183 if (cp->space == NULL) { | |
184 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); | |
185 assert(cp->gen != NULL, "compaction must succeed"); | |
186 cp->space = cp->gen->first_compaction_space(); | |
187 assert(cp->space != NULL, "generation must have a first compaction space"); | |
188 } | |
189 compact_top = cp->space->bottom(); | |
190 cp->space->set_compaction_top(compact_top); | |
191 // The correct adjusted_size may not be the same as that for this method | |
192 // (i.e., cp->space may no longer be "this" so adjust the size again. | |
193 // Use the virtual method which is not used above to save the virtual | |
194 // dispatch. | |
195 adjusted_size = cp->space->adjust_object_size_v(size); | |
196 compaction_max_size = pointer_delta(cp->space->end(), compact_top); | |
197 assert(cp->space->minimum_free_block_size() == 0, "just checking"); | |
198 } while (adjusted_size > compaction_max_size); | |
199 } | |
200 | |
201 // store the forwarding pointer into the mark word | |
202 if ((HeapWord*)q != compact_top) { | |
203 q->forward_to(oop(compact_top)); | |
204 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); | |
205 } else { | |
206 // if the object isn't moving we can just set the mark to the default | |
207 // mark and handle it specially later on. | |
208 q->init_mark(); | |
209 assert(q->forwardee() == NULL, "should be forwarded to NULL"); | |
210 } | |
211 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
212 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size)); |
0 | 213 compact_top += adjusted_size; |
214 | |
215 // we need to update the offset table so that the beginnings of objects can be | |
216 // found during scavenge. Note that we are updating the offset table based on | |
217 // where the object will be once the compaction phase finishes. | |
218 | |
219 // Always call cross_threshold(). A contiguous space can only call it when | |
220 // the compaction_top exceeds the current threshold but not for an | |
221 // non-contiguous space. | |
222 cp->threshold = | |
223 cp->space->cross_threshold(compact_top - adjusted_size, compact_top); | |
224 return compact_top; | |
225 } | |
226 | |
227 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt | |
228 // and use of single_block instead of alloc_block. The name here is not really | |
229 // appropriate - maybe a more general name could be invented for both the | |
230 // contiguous and noncontiguous spaces. | |
231 | |
232 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) { | |
233 _bt.single_block(start, the_end); | |
234 return end(); | |
235 } | |
236 | |
237 // Initialize them to NULL. | |
238 void CompactibleFreeListSpace::initializeIndexedFreeListArray() { | |
239 for (size_t i = 0; i < IndexSetSize; i++) { | |
240 // Note that on platforms where objects are double word aligned, | |
241 // the odd array elements are not used. It is convenient, however, | |
242 // to map directly from the object size to the array element. | |
243 _indexedFreeList[i].reset(IndexSetSize); | |
244 _indexedFreeList[i].set_size(i); | |
245 assert(_indexedFreeList[i].count() == 0, "reset check failed"); | |
246 assert(_indexedFreeList[i].head() == NULL, "reset check failed"); | |
247 assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); | |
248 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); | |
249 } | |
250 } | |
251 | |
252 void CompactibleFreeListSpace::resetIndexedFreeListArray() { | |
4068
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
253 for (size_t i = 1; i < IndexSetSize; i++) { |
0 | 254 assert(_indexedFreeList[i].size() == (size_t) i, |
255 "Indexed free list sizes are incorrect"); | |
256 _indexedFreeList[i].reset(IndexSetSize); | |
257 assert(_indexedFreeList[i].count() == 0, "reset check failed"); | |
258 assert(_indexedFreeList[i].head() == NULL, "reset check failed"); | |
259 assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); | |
260 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); | |
261 } | |
262 } | |
263 | |
264 void CompactibleFreeListSpace::reset(MemRegion mr) { | |
265 resetIndexedFreeListArray(); | |
266 dictionary()->reset(); | |
267 if (BlockOffsetArrayUseUnallocatedBlock) { | |
268 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen"); | |
269 // Everything's allocated until proven otherwise. | |
270 _bt.set_unallocated_block(end()); | |
271 } | |
272 if (!mr.is_empty()) { | |
273 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small"); | |
274 _bt.single_block(mr.start(), mr.word_size()); | |
275 FreeChunk* fc = (FreeChunk*) mr.start(); | |
276 fc->setSize(mr.word_size()); | |
277 if (mr.word_size() >= IndexSetSize ) { | |
278 returnChunkToDictionary(fc); | |
279 } else { | |
280 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
281 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc); | |
282 } | |
283 } | |
284 _promoInfo.reset(); | |
285 _smallLinearAllocBlock._ptr = NULL; | |
286 _smallLinearAllocBlock._word_size = 0; | |
287 } | |
288 | |
289 void CompactibleFreeListSpace::reset_after_compaction() { | |
290 // Reset the space to the new reality - one free chunk. | |
291 MemRegion mr(compaction_top(), end()); | |
292 reset(mr); | |
293 // Now refill the linear allocation block(s) if possible. | |
294 if (_adaptive_freelists) { | |
295 refillLinearAllocBlocksIfNeeded(); | |
296 } else { | |
297 // Place as much of mr in the linAB as we can get, | |
298 // provided it was big enough to go into the dictionary. | |
299 FreeChunk* fc = dictionary()->findLargestDict(); | |
300 if (fc != NULL) { | |
301 assert(fc->size() == mr.word_size(), | |
302 "Why was the chunk broken up?"); | |
303 removeChunkFromDictionary(fc); | |
304 HeapWord* addr = (HeapWord*) fc; | |
305 _smallLinearAllocBlock.set(addr, fc->size() , | |
306 1024*SmallForLinearAlloc, fc->size()); | |
307 // Note that _unallocated_block is not updated here. | |
308 } | |
309 } | |
310 } | |
311 | |
312 // Walks the entire dictionary, returning a coterminal | |
313 // chunk, if it exists. Use with caution since it involves | |
314 // a potentially complete walk of a potentially large tree. | |
315 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() { | |
316 | |
317 assert_lock_strong(&_freelistLock); | |
318 | |
319 return dictionary()->find_chunk_ends_at(end()); | |
320 } | |
321 | |
322 | |
323 #ifndef PRODUCT | |
324 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() { | |
325 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
326 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0); | |
327 } | |
328 } | |
329 | |
330 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() { | |
331 size_t sum = 0; | |
332 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
333 sum += _indexedFreeList[i].allocation_stats()->returnedBytes(); | |
334 } | |
335 return sum; | |
336 } | |
337 | |
338 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const { | |
339 size_t count = 0; | |
4068
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
340 for (size_t i = IndexSetStart; i < IndexSetSize; i++) { |
0 | 341 debug_only( |
342 ssize_t total_list_count = 0; | |
343 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
344 fc = fc->next()) { | |
345 total_list_count++; | |
346 } | |
347 assert(total_list_count == _indexedFreeList[i].count(), | |
348 "Count in list is incorrect"); | |
349 ) | |
350 count += _indexedFreeList[i].count(); | |
351 } | |
352 return count; | |
353 } | |
354 | |
355 size_t CompactibleFreeListSpace::totalCount() { | |
356 size_t num = totalCountInIndexedFreeLists(); | |
357 num += dictionary()->totalCount(); | |
358 if (_smallLinearAllocBlock._word_size != 0) { | |
359 num++; | |
360 } | |
361 return num; | |
362 } | |
363 #endif | |
364 | |
365 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const { | |
366 FreeChunk* fc = (FreeChunk*) p; | |
367 return fc->isFree(); | |
368 } | |
369 | |
370 size_t CompactibleFreeListSpace::used() const { | |
371 return capacity() - free(); | |
372 } | |
373 | |
374 size_t CompactibleFreeListSpace::free() const { | |
375 // "MT-safe, but not MT-precise"(TM), if you will: i.e. | |
376 // if you do this while the structures are in flux you | |
377 // may get an approximate answer only; for instance | |
378 // because there is concurrent allocation either | |
379 // directly by mutators or for promotion during a GC. | |
380 // It's "MT-safe", however, in the sense that you are guaranteed | |
381 // not to crash and burn, for instance, because of walking | |
382 // pointers that could disappear as you were walking them. | |
383 // The approximation is because the various components | |
384 // that are read below are not read atomically (and | |
385 // further the computation of totalSizeInIndexedFreeLists() | |
386 // is itself a non-atomic computation. The normal use of | |
387 // this is during a resize operation at the end of GC | |
388 // and at that time you are guaranteed to get the | |
389 // correct actual value. However, for instance, this is | |
390 // also read completely asynchronously by the "perf-sampler" | |
391 // that supports jvmstat, and you are apt to see the values | |
392 // flicker in such cases. | |
393 assert(_dictionary != NULL, "No _dictionary?"); | |
394 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) + | |
395 totalSizeInIndexedFreeLists() + | |
396 _smallLinearAllocBlock._word_size) * HeapWordSize; | |
397 } | |
398 | |
399 size_t CompactibleFreeListSpace::max_alloc_in_words() const { | |
400 assert(_dictionary != NULL, "No _dictionary?"); | |
401 assert_locked(); | |
402 size_t res = _dictionary->maxChunkSize(); | |
403 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size, | |
404 (size_t) SmallForLinearAlloc - 1)); | |
405 // XXX the following could potentially be pretty slow; | |
406 // should one, pesimally for the rare cases when res | |
407 // caclulated above is less than IndexSetSize, | |
408 // just return res calculated above? My reasoning was that | |
409 // those cases will be so rare that the extra time spent doesn't | |
410 // really matter.... | |
411 // Note: do not change the loop test i >= res + IndexSetStride | |
412 // to i > res below, because i is unsigned and res may be zero. | |
413 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride; | |
414 i -= IndexSetStride) { | |
415 if (_indexedFreeList[i].head() != NULL) { | |
416 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); | |
417 return i; | |
418 } | |
419 } | |
420 return res; | |
421 } | |
422 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
423 void LinearAllocBlock::print_on(outputStream* st) const { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
424 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
425 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT, |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
426 _ptr, _word_size, _refillSize, _allocation_size_limit); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
427 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
428 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
429 void CompactibleFreeListSpace::print_on(outputStream* st) const { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
430 st->print_cr("COMPACTIBLE FREELIST SPACE"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
431 st->print_cr(" Space:"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
432 Space::print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
433 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
434 st->print_cr("promoInfo:"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
435 _promoInfo.print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
436 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
437 st->print_cr("_smallLinearAllocBlock"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
438 _smallLinearAllocBlock.print_on(st); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
439 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
440 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
441 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
442 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
443 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
444 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
445 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
446 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
447 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
448 reportIndexedFreeListStatistics(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
449 gclog_or_tty->print_cr("Layout of Indexed Freelists"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
450 gclog_or_tty->print_cr("---------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
451 FreeList::print_labels_on(st, "size"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
452 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
453 _indexedFreeList[i].print_on(gclog_or_tty); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
454 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
455 fc = fc->next()) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
456 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
457 fc, (HeapWord*)fc + i, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
458 fc->cantCoalesce() ? "\t CC" : ""); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
459 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
460 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
461 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
462 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
463 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
464 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
465 _promoInfo.print_on(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
466 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
467 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
468 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
469 const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
470 _dictionary->reportStatistics(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
471 st->print_cr("Layout of Freelists in Tree"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
472 st->print_cr("---------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
473 _dictionary->print_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
474 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
475 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
476 class BlkPrintingClosure: public BlkClosure { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
477 const CMSCollector* _collector; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
478 const CompactibleFreeListSpace* _sp; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
479 const CMSBitMap* _live_bit_map; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
480 const bool _post_remark; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
481 outputStream* _st; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
482 public: |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
483 BlkPrintingClosure(const CMSCollector* collector, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
484 const CompactibleFreeListSpace* sp, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
485 const CMSBitMap* live_bit_map, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
486 outputStream* st): |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
487 _collector(collector), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
488 _sp(sp), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
489 _live_bit_map(live_bit_map), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
490 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
491 _st(st) { } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
492 size_t do_blk(HeapWord* addr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
493 }; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
494 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
495 size_t BlkPrintingClosure::do_blk(HeapWord* addr) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
496 size_t sz = _sp->block_size_no_stall(addr, _collector); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
497 assert(sz != 0, "Should always be able to compute a size"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
498 if (_sp->block_is_obj(addr)) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
499 const bool dead = _post_remark && !_live_bit_map->isMarked(addr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
500 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
501 addr, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
502 dead ? "dead" : "live", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
503 sz, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
504 (!dead && CMSPrintObjectsInDump) ? ":" : "."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
505 if (CMSPrintObjectsInDump && !dead) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
506 oop(addr)->print_on(_st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
507 _st->print_cr("--------------------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
508 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
509 } else { // free block |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
510 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
511 addr, sz, CMSPrintChunksInDump ? ":" : "."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
512 if (CMSPrintChunksInDump) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
513 ((FreeChunk*)addr)->print_on(_st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
514 _st->print_cr("--------------------------------------"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
515 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
516 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
517 return sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
518 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
519 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
520 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
521 outputStream* st) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
522 st->print_cr("\n========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
523 st->print_cr("Block layout in CMS Heap:"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
524 st->print_cr("========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
525 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
526 blk_iterate(&bpcl); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
527 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
528 st->print_cr("\n======================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
529 st->print_cr("Order & Layout of Promotion Info Blocks"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
530 st->print_cr("======================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
531 print_promo_info_blocks(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
532 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
533 st->print_cr("\n==========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
534 st->print_cr("Order of Indexed Free Lists"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
535 st->print_cr("========================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
536 print_indexed_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
537 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
538 st->print_cr("\n================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
539 st->print_cr("Order of Free Lists in Dictionary"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
540 st->print_cr("================================="); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
541 print_dictionary_free_lists(st); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
542 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
543 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
544 |
0 | 545 void CompactibleFreeListSpace::reportFreeListStatistics() const { |
546 assert_lock_strong(&_freelistLock); | |
547 assert(PrintFLSStatistics != 0, "Reporting error"); | |
548 _dictionary->reportStatistics(); | |
549 if (PrintFLSStatistics > 1) { | |
550 reportIndexedFreeListStatistics(); | |
551 size_t totalSize = totalSizeInIndexedFreeLists() + | |
552 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); | |
553 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag()); | |
554 } | |
555 } | |
556 | |
557 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const { | |
558 assert_lock_strong(&_freelistLock); | |
559 gclog_or_tty->print("Statistics for IndexedFreeLists:\n" | |
560 "--------------------------------\n"); | |
561 size_t totalSize = totalSizeInIndexedFreeLists(); | |
562 size_t freeBlocks = numFreeBlocksInIndexedFreeLists(); | |
563 gclog_or_tty->print("Total Free Space: %d\n", totalSize); | |
564 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists()); | |
565 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks); | |
566 if (freeBlocks != 0) { | |
567 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks); | |
568 } | |
569 } | |
570 | |
571 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const { | |
572 size_t res = 0; | |
573 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
574 debug_only( | |
575 ssize_t recount = 0; | |
576 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
577 fc = fc->next()) { | |
578 recount += 1; | |
579 } | |
580 assert(recount == _indexedFreeList[i].count(), | |
581 "Incorrect count in list"); | |
582 ) | |
583 res += _indexedFreeList[i].count(); | |
584 } | |
585 return res; | |
586 } | |
587 | |
588 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const { | |
589 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { | |
590 if (_indexedFreeList[i].head() != NULL) { | |
591 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); | |
592 return (size_t)i; | |
593 } | |
594 } | |
595 return 0; | |
596 } | |
597 | |
598 void CompactibleFreeListSpace::set_end(HeapWord* value) { | |
599 HeapWord* prevEnd = end(); | |
600 assert(prevEnd != value, "unnecessary set_end call"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
601 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
602 "New end is below unallocated block"); |
0 | 603 _end = value; |
604 if (prevEnd != NULL) { | |
605 // Resize the underlying block offset table. | |
606 _bt.resize(pointer_delta(value, bottom())); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
607 if (value <= prevEnd) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
608 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
609 "New end is below unallocated block"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
610 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
611 // Now, take this new chunk and add it to the free blocks. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
612 // Note that the BOT has not yet been updated for this block. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
613 size_t newFcSize = pointer_delta(value, prevEnd); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
614 // XXX This is REALLY UGLY and should be fixed up. XXX |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
615 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
616 // Mark the boundary of the new block in BOT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
617 _bt.mark_block(prevEnd, value); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
618 // put it all in the linAB |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
619 if (ParallelGCThreads == 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
620 _smallLinearAllocBlock._ptr = prevEnd; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
621 _smallLinearAllocBlock._word_size = newFcSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
622 repairLinearAllocBlock(&_smallLinearAllocBlock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
623 } else { // ParallelGCThreads > 0 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
624 MutexLockerEx x(parDictionaryAllocLock(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
625 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
626 _smallLinearAllocBlock._ptr = prevEnd; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
627 _smallLinearAllocBlock._word_size = newFcSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
628 repairLinearAllocBlock(&_smallLinearAllocBlock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
629 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
630 // Births of chunks put into a LinAB are not recorded. Births |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
631 // of chunks as they are allocated out of a LinAB are. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
632 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
633 // Add the block to the free lists, if possible coalescing it |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
634 // with the last free block, and update the BOT and census data. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
635 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize); |
0 | 636 } |
637 } | |
638 } | |
639 } | |
640 | |
641 class FreeListSpace_DCTOC : public Filtering_DCTOC { | |
642 CompactibleFreeListSpace* _cfls; | |
643 CMSCollector* _collector; | |
644 protected: | |
645 // Override. | |
646 #define walk_mem_region_with_cl_DECL(ClosureType) \ | |
647 virtual void walk_mem_region_with_cl(MemRegion mr, \ | |
648 HeapWord* bottom, HeapWord* top, \ | |
649 ClosureType* cl); \ | |
650 void walk_mem_region_with_cl_par(MemRegion mr, \ | |
651 HeapWord* bottom, HeapWord* top, \ | |
652 ClosureType* cl); \ | |
653 void walk_mem_region_with_cl_nopar(MemRegion mr, \ | |
654 HeapWord* bottom, HeapWord* top, \ | |
655 ClosureType* cl) | |
656 walk_mem_region_with_cl_DECL(OopClosure); | |
657 walk_mem_region_with_cl_DECL(FilteringClosure); | |
658 | |
659 public: | |
660 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, | |
661 CMSCollector* collector, | |
662 OopClosure* cl, | |
663 CardTableModRefBS::PrecisionStyle precision, | |
664 HeapWord* boundary) : | |
665 Filtering_DCTOC(sp, cl, precision, boundary), | |
666 _cfls(sp), _collector(collector) {} | |
667 }; | |
668 | |
669 // We de-virtualize the block-related calls below, since we know that our | |
670 // space is a CompactibleFreeListSpace. | |
4095
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
4068
diff
changeset
|
671 |
0 | 672 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ |
673 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ | |
674 HeapWord* bottom, \ | |
675 HeapWord* top, \ | |
676 ClosureType* cl) { \ | |
4095
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
4068
diff
changeset
|
677 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \ |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
4068
diff
changeset
|
678 if (is_par) { \ |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
4068
diff
changeset
|
679 assert(SharedHeap::heap()->n_par_threads() == \ |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
4068
diff
changeset
|
680 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \ |
0 | 681 walk_mem_region_with_cl_par(mr, bottom, top, cl); \ |
682 } else { \ | |
683 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ | |
684 } \ | |
685 } \ | |
686 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \ | |
687 HeapWord* bottom, \ | |
688 HeapWord* top, \ | |
689 ClosureType* cl) { \ | |
690 /* Skip parts that are before "mr", in case "block_start" sent us \ | |
691 back too far. */ \ | |
692 HeapWord* mr_start = mr.start(); \ | |
693 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
694 HeapWord* next = bottom + bot_size; \ | |
695 while (next < mr_start) { \ | |
696 bottom = next; \ | |
697 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
698 next = bottom + bot_size; \ | |
699 } \ | |
700 \ | |
701 while (bottom < top) { \ | |
702 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \ | |
703 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ | |
704 oop(bottom)) && \ | |
705 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ | |
706 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ | |
707 bottom += _cfls->adjustObjectSize(word_sz); \ | |
708 } else { \ | |
709 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \ | |
710 } \ | |
711 } \ | |
712 } \ | |
713 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \ | |
714 HeapWord* bottom, \ | |
715 HeapWord* top, \ | |
716 ClosureType* cl) { \ | |
717 /* Skip parts that are before "mr", in case "block_start" sent us \ | |
718 back too far. */ \ | |
719 HeapWord* mr_start = mr.start(); \ | |
720 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
721 HeapWord* next = bottom + bot_size; \ | |
722 while (next < mr_start) { \ | |
723 bottom = next; \ | |
724 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
725 next = bottom + bot_size; \ | |
726 } \ | |
727 \ | |
728 while (bottom < top) { \ | |
729 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \ | |
730 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ | |
731 oop(bottom)) && \ | |
732 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ | |
733 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ | |
734 bottom += _cfls->adjustObjectSize(word_sz); \ | |
735 } else { \ | |
736 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ | |
737 } \ | |
738 } \ | |
739 } | |
740 | |
741 // (There are only two of these, rather than N, because the split is due | |
742 // only to the introduction of the FilteringClosure, a local part of the | |
743 // impl of this abstraction.) | |
744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure) | |
745 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) | |
746 | |
747 DirtyCardToOopClosure* | |
748 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl, | |
749 CardTableModRefBS::PrecisionStyle precision, | |
750 HeapWord* boundary) { | |
751 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); | |
752 } | |
753 | |
754 | |
755 // Note on locking for the space iteration functions: | |
756 // since the collector's iteration activities are concurrent with | |
757 // allocation activities by mutators, absent a suitable mutual exclusion | |
758 // mechanism the iterators may go awry. For instace a block being iterated | |
759 // may suddenly be allocated or divided up and part of it allocated and | |
760 // so on. | |
761 | |
762 // Apply the given closure to each block in the space. | |
763 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) { | |
764 assert_lock_strong(freelistLock()); | |
765 HeapWord *cur, *limit; | |
766 for (cur = bottom(), limit = end(); cur < limit; | |
767 cur += cl->do_blk_careful(cur)); | |
768 } | |
769 | |
770 // Apply the given closure to each block in the space. | |
771 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) { | |
772 assert_lock_strong(freelistLock()); | |
773 HeapWord *cur, *limit; | |
774 for (cur = bottom(), limit = end(); cur < limit; | |
775 cur += cl->do_blk(cur)); | |
776 } | |
777 | |
778 // Apply the given closure to each oop in the space. | |
779 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) { | |
780 assert_lock_strong(freelistLock()); | |
781 HeapWord *cur, *limit; | |
782 size_t curSize; | |
783 for (cur = bottom(), limit = end(); cur < limit; | |
784 cur += curSize) { | |
785 curSize = block_size(cur); | |
786 if (block_is_obj(cur)) { | |
787 oop(cur)->oop_iterate(cl); | |
788 } | |
789 } | |
790 } | |
791 | |
792 // Apply the given closure to each oop in the space \intersect memory region. | |
793 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) { | |
794 assert_lock_strong(freelistLock()); | |
795 if (is_empty()) { | |
796 return; | |
797 } | |
798 MemRegion cur = MemRegion(bottom(), end()); | |
799 mr = mr.intersection(cur); | |
800 if (mr.is_empty()) { | |
801 return; | |
802 } | |
803 if (mr.equals(cur)) { | |
804 oop_iterate(cl); | |
805 return; | |
806 } | |
807 assert(mr.end() <= end(), "just took an intersection above"); | |
808 HeapWord* obj_addr = block_start(mr.start()); | |
809 HeapWord* t = mr.end(); | |
810 | |
811 SpaceMemRegionOopsIterClosure smr_blk(cl, mr); | |
812 if (block_is_obj(obj_addr)) { | |
813 // Handle first object specially. | |
814 oop obj = oop(obj_addr); | |
815 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk)); | |
816 } else { | |
817 FreeChunk* fc = (FreeChunk*)obj_addr; | |
818 obj_addr += fc->size(); | |
819 } | |
820 while (obj_addr < t) { | |
821 HeapWord* obj = obj_addr; | |
822 obj_addr += block_size(obj_addr); | |
823 // If "obj_addr" is not greater than top, then the | |
824 // entire object "obj" is within the region. | |
825 if (obj_addr <= t) { | |
826 if (block_is_obj(obj)) { | |
827 oop(obj)->oop_iterate(cl); | |
828 } | |
829 } else { | |
830 // "obj" extends beyond end of region | |
831 if (block_is_obj(obj)) { | |
832 oop(obj)->oop_iterate(&smr_blk); | |
833 } | |
834 break; | |
835 } | |
836 } | |
837 } | |
838 | |
839 // NOTE: In the following methods, in order to safely be able to | |
840 // apply the closure to an object, we need to be sure that the | |
841 // object has been initialized. We are guaranteed that an object | |
842 // is initialized if we are holding the Heap_lock with the | |
843 // world stopped. | |
844 void CompactibleFreeListSpace::verify_objects_initialized() const { | |
845 if (is_init_completed()) { | |
846 assert_locked_or_safepoint(Heap_lock); | |
847 if (Universe::is_fully_initialized()) { | |
848 guarantee(SafepointSynchronize::is_at_safepoint(), | |
849 "Required for objects to be initialized"); | |
850 } | |
851 } // else make a concession at vm start-up | |
852 } | |
853 | |
854 // Apply the given closure to each object in the space | |
855 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) { | |
856 assert_lock_strong(freelistLock()); | |
857 NOT_PRODUCT(verify_objects_initialized()); | |
858 HeapWord *cur, *limit; | |
859 size_t curSize; | |
860 for (cur = bottom(), limit = end(); cur < limit; | |
861 cur += curSize) { | |
862 curSize = block_size(cur); | |
863 if (block_is_obj(cur)) { | |
864 blk->do_object(oop(cur)); | |
865 } | |
866 } | |
867 } | |
868 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
869 // Apply the given closure to each live object in the space |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
870 // The usage of CompactibleFreeListSpace |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
871 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
872 // objects in the space with references to objects that are no longer |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
873 // valid. For example, an object may reference another object |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
874 // that has already been sweep up (collected). This method uses |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
875 // obj_is_alive() to determine whether it is safe to apply the closure to |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
876 // an object. See obj_is_alive() for details on how liveness of an |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
877 // object is decided. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
878 |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
879 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
880 assert_lock_strong(freelistLock()); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
881 NOT_PRODUCT(verify_objects_initialized()); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
882 HeapWord *cur, *limit; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
883 size_t curSize; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
884 for (cur = bottom(), limit = end(); cur < limit; |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
885 cur += curSize) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
886 curSize = block_size(cur); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
887 if (block_is_obj(cur) && obj_is_alive(cur)) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
888 blk->do_object(oop(cur)); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
889 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
890 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
891 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
360
diff
changeset
|
892 |
0 | 893 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, |
894 UpwardsObjectClosure* cl) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
895 assert_locked(freelistLock()); |
0 | 896 NOT_PRODUCT(verify_objects_initialized()); |
897 Space::object_iterate_mem(mr, cl); | |
898 } | |
899 | |
900 // Callers of this iterator beware: The closure application should | |
901 // be robust in the face of uninitialized objects and should (always) | |
902 // return a correct size so that the next addr + size below gives us a | |
903 // valid block boundary. [See for instance, | |
904 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | |
905 // in ConcurrentMarkSweepGeneration.cpp.] | |
906 HeapWord* | |
907 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) { | |
908 assert_lock_strong(freelistLock()); | |
909 HeapWord *addr, *last; | |
910 size_t size; | |
911 for (addr = bottom(), last = end(); | |
912 addr < last; addr += size) { | |
913 FreeChunk* fc = (FreeChunk*)addr; | |
914 if (fc->isFree()) { | |
915 // Since we hold the free list lock, which protects direct | |
916 // allocation in this generation by mutators, a free object | |
917 // will remain free throughout this iteration code. | |
918 size = fc->size(); | |
919 } else { | |
920 // Note that the object need not necessarily be initialized, | |
921 // because (for instance) the free list lock does NOT protect | |
922 // object initialization. The closure application below must | |
923 // therefore be correct in the face of uninitialized objects. | |
924 size = cl->do_object_careful(oop(addr)); | |
925 if (size == 0) { | |
926 // An unparsable object found. Signal early termination. | |
927 return addr; | |
928 } | |
929 } | |
930 } | |
931 return NULL; | |
932 } | |
933 | |
934 // Callers of this iterator beware: The closure application should | |
935 // be robust in the face of uninitialized objects and should (always) | |
936 // return a correct size so that the next addr + size below gives us a | |
937 // valid block boundary. [See for instance, | |
938 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | |
939 // in ConcurrentMarkSweepGeneration.cpp.] | |
940 HeapWord* | |
941 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr, | |
942 ObjectClosureCareful* cl) { | |
943 assert_lock_strong(freelistLock()); | |
944 // Can't use used_region() below because it may not necessarily | |
945 // be the same as [bottom(),end()); although we could | |
946 // use [used_region().start(),round_to(used_region().end(),CardSize)), | |
947 // that appears too cumbersome, so we just do the simpler check | |
948 // in the assertion below. | |
949 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr), | |
950 "mr should be non-empty and within used space"); | |
951 HeapWord *addr, *end; | |
952 size_t size; | |
953 for (addr = block_start_careful(mr.start()), end = mr.end(); | |
954 addr < end; addr += size) { | |
955 FreeChunk* fc = (FreeChunk*)addr; | |
956 if (fc->isFree()) { | |
957 // Since we hold the free list lock, which protects direct | |
958 // allocation in this generation by mutators, a free object | |
959 // will remain free throughout this iteration code. | |
960 size = fc->size(); | |
961 } else { | |
962 // Note that the object need not necessarily be initialized, | |
963 // because (for instance) the free list lock does NOT protect | |
964 // object initialization. The closure application below must | |
965 // therefore be correct in the face of uninitialized objects. | |
966 size = cl->do_object_careful_m(oop(addr), mr); | |
967 if (size == 0) { | |
968 // An unparsable object found. Signal early termination. | |
969 return addr; | |
970 } | |
971 } | |
972 } | |
973 return NULL; | |
974 } | |
975 | |
976 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
977 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const { |
0 | 978 NOT_PRODUCT(verify_objects_initialized()); |
979 return _bt.block_start(p); | |
980 } | |
981 | |
982 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const { | |
983 return _bt.block_start_careful(p); | |
984 } | |
985 | |
986 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { | |
987 NOT_PRODUCT(verify_objects_initialized()); | |
988 // This must be volatile, or else there is a danger that the compiler | |
989 // will compile the code below into a sometimes-infinite loop, by keeping | |
990 // the value read the first time in a register. | |
991 while (true) { | |
992 // We must do this until we get a consistent view of the object. | |
187 | 993 if (FreeChunk::indicatesFreeChunk(p)) { |
994 volatile FreeChunk* fc = (volatile FreeChunk*)p; | |
995 size_t res = fc->size(); | |
996 // If the object is still a free chunk, return the size, else it | |
997 // has been allocated so try again. | |
998 if (FreeChunk::indicatesFreeChunk(p)) { | |
0 | 999 assert(res != 0, "Block size should not be 0"); |
1000 return res; | |
1001 } | |
187 | 1002 } else { |
1003 // must read from what 'p' points to in each loop. | |
1004 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); | |
1005 if (k != NULL) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1006 assert(k->is_oop(true /* ignore mark word */), "Should be klass oop"); |
187 | 1007 oop o = (oop)p; |
1008 assert(o->is_parsable(), "Should be parsable"); | |
1009 assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); | |
1010 size_t res = o->size_given_klass(k->klass_part()); | |
1011 res = adjustObjectSize(res); | |
1012 assert(res != 0, "Block size should not be 0"); | |
1013 return res; | |
1014 } | |
0 | 1015 } |
1016 } | |
1017 } | |
1018 | |
1019 // A variant of the above that uses the Printezis bits for | |
1020 // unparsable but allocated objects. This avoids any possible | |
1021 // stalls waiting for mutators to initialize objects, and is | |
1022 // thus potentially faster than the variant above. However, | |
1023 // this variant may return a zero size for a block that is | |
1024 // under mutation and for which a consistent size cannot be | |
1025 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits(). | |
1026 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p, | |
1027 const CMSCollector* c) | |
1028 const { | |
1029 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
1030 // This must be volatile, or else there is a danger that the compiler | |
1031 // will compile the code below into a sometimes-infinite loop, by keeping | |
1032 // the value read the first time in a register. | |
1033 DEBUG_ONLY(uint loops = 0;) | |
1034 while (true) { | |
1035 // We must do this until we get a consistent view of the object. | |
187 | 1036 if (FreeChunk::indicatesFreeChunk(p)) { |
1037 volatile FreeChunk* fc = (volatile FreeChunk*)p; | |
1038 size_t res = fc->size(); | |
1039 if (FreeChunk::indicatesFreeChunk(p)) { | |
0 | 1040 assert(res != 0, "Block size should not be 0"); |
1041 assert(loops == 0, "Should be 0"); | |
1042 return res; | |
1043 } | |
1044 } else { | |
187 | 1045 // must read from what 'p' points to in each loop. |
1046 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); | |
2226
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
1972
diff
changeset
|
1047 // We trust the size of any object that has a non-NULL |
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
1972
diff
changeset
|
1048 // klass and (for those in the perm gen) is parsable |
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
1972
diff
changeset
|
1049 // -- irrespective of its conc_safe-ty. |
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
1972
diff
changeset
|
1050 if (k != NULL && ((oopDesc*)p)->is_parsable()) { |
187 | 1051 assert(k->is_oop(), "Should really be klass oop."); |
1052 oop o = (oop)p; | |
1053 assert(o->is_oop(), "Should be an oop"); | |
1054 size_t res = o->size_given_klass(k->klass_part()); | |
1055 res = adjustObjectSize(res); | |
1056 assert(res != 0, "Block size should not be 0"); | |
1057 return res; | |
1058 } else { | |
2226
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
1972
diff
changeset
|
1059 // May return 0 if P-bits not present. |
187 | 1060 return c->block_size_if_printezis_bits(p); |
1061 } | |
0 | 1062 } |
1063 assert(loops == 0, "Can loop at most once"); | |
1064 DEBUG_ONLY(loops++;) | |
1065 } | |
1066 } | |
1067 | |
1068 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const { | |
1069 NOT_PRODUCT(verify_objects_initialized()); | |
1070 assert(MemRegion(bottom(), end()).contains(p), "p not in space"); | |
1071 FreeChunk* fc = (FreeChunk*)p; | |
1072 if (fc->isFree()) { | |
1073 return fc->size(); | |
1074 } else { | |
1075 // Ignore mark word because this may be a recently promoted | |
1076 // object whose mark word is used to chain together grey | |
1077 // objects (the last one would have a null value). | |
1078 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1079 return adjustObjectSize(oop(p)->size()); | |
1080 } | |
1081 } | |
1082 | |
1083 // This implementation assumes that the property of "being an object" is | |
1084 // stable. But being a free chunk may not be (because of parallel | |
1085 // promotion.) | |
1086 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const { | |
1087 FreeChunk* fc = (FreeChunk*)p; | |
1088 assert(is_in_reserved(p), "Should be in space"); | |
1089 // When doing a mark-sweep-compact of the CMS generation, this | |
1090 // assertion may fail because prepare_for_compaction() uses | |
1091 // space that is garbage to maintain information on ranges of | |
1092 // live objects so that these live ranges can be moved as a whole. | |
1093 // Comment out this assertion until that problem can be solved | |
1094 // (i.e., that the block start calculation may look at objects | |
1095 // at address below "p" in finding the object that contains "p" | |
1096 // and those objects (if garbage) may have been modified to hold | |
1097 // live range information. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
1098 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
1099 // "Should be a block boundary"); |
187 | 1100 if (FreeChunk::indicatesFreeChunk(p)) return false; |
1101 klassOop k = oop(p)->klass_or_null(); | |
0 | 1102 if (k != NULL) { |
1103 // Ignore mark word because it may have been used to | |
1104 // chain together promoted objects (the last one | |
1105 // would have a null value). | |
1106 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1107 return true; | |
1108 } else { | |
1109 return false; // Was not an object at the start of collection. | |
1110 } | |
1111 } | |
1112 | |
1113 // Check if the object is alive. This fact is checked either by consulting | |
1114 // the main marking bitmap in the sweeping phase or, if it's a permanent | |
1115 // generation and we're not in the sweeping phase, by checking the | |
1116 // perm_gen_verify_bit_map where we store the "deadness" information if | |
1117 // we did not sweep the perm gen in the most recent previous GC cycle. | |
1118 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const { | |
1959
9eecf81a02fb
7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
ysr
parents:
1952
diff
changeset
|
1119 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), |
9eecf81a02fb
7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
ysr
parents:
1952
diff
changeset
|
1120 "Else races are possible"); |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1121 assert(block_is_obj(p), "The address should point to an object"); |
0 | 1122 |
1123 // If we're sweeping, we use object liveness information from the main bit map | |
1124 // for both perm gen and old gen. | |
1125 // We don't need to lock the bitmap (live_map or dead_map below), because | |
1126 // EITHER we are in the middle of the sweeping phase, and the | |
1127 // main marking bit map (live_map below) is locked, | |
1128 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below) | |
1129 // is stable, because it's mutated only in the sweeping phase. | |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1130 // NOTE: This method is also used by jmap where, if class unloading is |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1131 // off, the results can return "false" for legitimate perm objects, |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1132 // when we are not in the midst of a sweeping phase, which can result |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1133 // in jmap not reporting certain perm gen objects. This will be moot |
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1134 // if/when the perm gen goes away in the future. |
0 | 1135 if (_collector->abstract_state() == CMSCollector::Sweeping) { |
1136 CMSBitMap* live_map = _collector->markBitMap(); | |
1951
899bbbdcb6ea
6997298: fatal error: must own lock CMS_markBitMap_lock during heap dump
ysr
parents:
1833
diff
changeset
|
1137 return live_map->par_isMarked((HeapWord*) p); |
0 | 1138 } else { |
1139 // If we're not currently sweeping and we haven't swept the perm gen in | |
1140 // the previous concurrent cycle then we may have dead but unswept objects | |
1141 // in the perm gen. In this case, we use the "deadness" information | |
1142 // that we had saved in perm_gen_verify_bit_map at the last sweep. | |
1143 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) { | |
1144 if (_collector->verifying()) { | |
1145 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map(); | |
1146 // Object is marked in the dead_map bitmap at the previous sweep | |
1147 // when we know that it's dead; if the bitmap is not allocated then | |
1148 // the object is alive. | |
1149 return (dead_map->sizeInBits() == 0) // bit_map has been allocated | |
1150 || !dead_map->par_isMarked((HeapWord*) p); | |
1151 } else { | |
1152 return false; // We can't say for sure if it's live, so we say that it's dead. | |
1153 } | |
1154 } | |
1155 } | |
1156 return true; | |
1157 } | |
1158 | |
1159 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const { | |
1160 FreeChunk* fc = (FreeChunk*)p; | |
1161 assert(is_in_reserved(p), "Should be in space"); | |
1162 assert(_bt.block_start(p) == p, "Should be a block boundary"); | |
1163 if (!fc->isFree()) { | |
1164 // Ignore mark word because it may have been used to | |
1165 // chain together promoted objects (the last one | |
1166 // would have a null value). | |
1167 assert(oop(p)->is_oop(true), "Should be an oop"); | |
1168 return true; | |
1169 } | |
1170 return false; | |
1171 } | |
1172 | |
1173 // "MT-safe but not guaranteed MT-precise" (TM); you may get an | |
1174 // approximate answer if you don't hold the freelistlock when you call this. | |
1175 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const { | |
1176 size_t size = 0; | |
1177 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
1178 debug_only( | |
1179 // We may be calling here without the lock in which case we | |
1180 // won't do this modest sanity check. | |
1181 if (freelistLock()->owned_by_self()) { | |
1182 size_t total_list_size = 0; | |
1183 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; | |
1184 fc = fc->next()) { | |
1185 total_list_size += i; | |
1186 } | |
1187 assert(total_list_size == i * _indexedFreeList[i].count(), | |
1188 "Count in list is incorrect"); | |
1189 } | |
1190 ) | |
1191 size += i * _indexedFreeList[i].count(); | |
1192 } | |
1193 return size; | |
1194 } | |
1195 | |
1196 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) { | |
1197 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
1198 return allocate(size); | |
1199 } | |
1200 | |
1201 HeapWord* | |
1202 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) { | |
1203 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size); | |
1204 } | |
1205 | |
1206 HeapWord* CompactibleFreeListSpace::allocate(size_t size) { | |
1207 assert_lock_strong(freelistLock()); | |
1208 HeapWord* res = NULL; | |
1209 assert(size == adjustObjectSize(size), | |
1210 "use adjustObjectSize() before calling into allocate()"); | |
1211 | |
1212 if (_adaptive_freelists) { | |
1213 res = allocate_adaptive_freelists(size); | |
1214 } else { // non-adaptive free lists | |
1215 res = allocate_non_adaptive_freelists(size); | |
1216 } | |
1217 | |
1218 if (res != NULL) { | |
1219 // check that res does lie in this space! | |
1220 assert(is_in_reserved(res), "Not in this space!"); | |
1221 assert(is_aligned((void*)res), "alignment check"); | |
1222 | |
1223 FreeChunk* fc = (FreeChunk*)res; | |
1224 fc->markNotFree(); | |
1225 assert(!fc->isFree(), "shouldn't be marked free"); | |
187 | 1226 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized"); |
0 | 1227 // Verify that the block offset table shows this to |
1228 // be a single block, but not one which is unallocated. | |
1229 _bt.verify_single_block(res, size); | |
1230 _bt.verify_not_unallocated(res, size); | |
1231 // mangle a just allocated object with a distinct pattern. | |
1232 debug_only(fc->mangleAllocated(size)); | |
1233 } | |
1234 | |
1235 return res; | |
1236 } | |
1237 | |
1238 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) { | |
1239 HeapWord* res = NULL; | |
1240 // try and use linear allocation for smaller blocks | |
1241 if (size < _smallLinearAllocBlock._allocation_size_limit) { | |
1242 // if successful, the following also adjusts block offset table | |
1243 res = getChunkFromSmallLinearAllocBlock(size); | |
1244 } | |
1245 // Else triage to indexed lists for smaller sizes | |
1246 if (res == NULL) { | |
1247 if (size < SmallForDictionary) { | |
1248 res = (HeapWord*) getChunkFromIndexedFreeList(size); | |
1249 } else { | |
1250 // else get it from the big dictionary; if even this doesn't | |
1251 // work we are out of luck. | |
1252 res = (HeapWord*)getChunkFromDictionaryExact(size); | |
1253 } | |
1254 } | |
1255 | |
1256 return res; | |
1257 } | |
1258 | |
1259 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { | |
1260 assert_lock_strong(freelistLock()); | |
1261 HeapWord* res = NULL; | |
1262 assert(size == adjustObjectSize(size), | |
1263 "use adjustObjectSize() before calling into allocate()"); | |
1264 | |
1265 // Strategy | |
1266 // if small | |
1267 // exact size from small object indexed list if small | |
1268 // small or large linear allocation block (linAB) as appropriate | |
1269 // take from lists of greater sized chunks | |
1270 // else | |
1271 // dictionary | |
1272 // small or large linear allocation block if it has the space | |
1273 // Try allocating exact size from indexTable first | |
1274 if (size < IndexSetSize) { | |
1275 res = (HeapWord*) getChunkFromIndexedFreeList(size); | |
1276 if(res != NULL) { | |
1277 assert(res != (HeapWord*)_indexedFreeList[size].head(), | |
1278 "Not removed from free list"); | |
1279 // no block offset table adjustment is necessary on blocks in | |
1280 // the indexed lists. | |
1281 | |
1282 // Try allocating from the small LinAB | |
1283 } else if (size < _smallLinearAllocBlock._allocation_size_limit && | |
1284 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) { | |
1285 // if successful, the above also adjusts block offset table | |
1286 // Note that this call will refill the LinAB to | |
1287 // satisfy the request. This is different that | |
1288 // evm. | |
1289 // Don't record chunk off a LinAB? smallSplitBirth(size); | |
1290 } else { | |
1291 // Raid the exact free lists larger than size, even if they are not | |
1292 // overpopulated. | |
1293 res = (HeapWord*) getChunkFromGreater(size); | |
1294 } | |
1295 } else { | |
1296 // Big objects get allocated directly from the dictionary. | |
1297 res = (HeapWord*) getChunkFromDictionaryExact(size); | |
1298 if (res == NULL) { | |
1299 // Try hard not to fail since an allocation failure will likely | |
1300 // trigger a synchronous GC. Try to get the space from the | |
1301 // allocation blocks. | |
1302 res = getChunkFromSmallLinearAllocBlockRemainder(size); | |
1303 } | |
1304 } | |
1305 | |
1306 return res; | |
1307 } | |
1308 | |
1309 // A worst-case estimate of the space required (in HeapWords) to expand the heap | |
1310 // when promoting obj. | |
1311 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const { | |
1312 // Depending on the object size, expansion may require refilling either a | |
1313 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize | |
1314 // is added because the dictionary may over-allocate to avoid fragmentation. | |
1315 size_t space = obj_size; | |
1316 if (!_adaptive_freelists) { | |
1317 space = MAX2(space, _smallLinearAllocBlock._refillSize); | |
1318 } | |
1319 space += _promoInfo.refillSize() + 2 * MinChunkSize; | |
1320 return space; | |
1321 } | |
1322 | |
1323 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { | |
1324 FreeChunk* ret; | |
1325 | |
1326 assert(numWords >= MinChunkSize, "Size is less than minimum"); | |
1327 assert(linearAllocationWouldFail() || bestFitFirst(), | |
1328 "Should not be here"); | |
1329 | |
1330 size_t i; | |
1331 size_t currSize = numWords + MinChunkSize; | |
1332 assert(currSize % MinObjAlignment == 0, "currSize should be aligned"); | |
1333 for (i = currSize; i < IndexSetSize; i += IndexSetStride) { | |
1334 FreeList* fl = &_indexedFreeList[i]; | |
1335 if (fl->head()) { | |
1336 ret = getFromListGreater(fl, numWords); | |
1337 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); | |
1338 return ret; | |
1339 } | |
1340 } | |
1341 | |
1342 currSize = MAX2((size_t)SmallForDictionary, | |
1343 (size_t)(numWords + MinChunkSize)); | |
1344 | |
1345 /* Try to get a chunk that satisfies request, while avoiding | |
1346 fragmentation that can't be handled. */ | |
1347 { | |
1348 ret = dictionary()->getChunk(currSize); | |
1349 if (ret != NULL) { | |
1350 assert(ret->size() - numWords >= MinChunkSize, | |
1351 "Chunk is too small"); | |
1352 _bt.allocated((HeapWord*)ret, ret->size()); | |
1353 /* Carve returned chunk. */ | |
1354 (void) splitChunkAndReturnRemainder(ret, numWords); | |
1355 /* Label this as no longer a free chunk. */ | |
1356 assert(ret->isFree(), "This chunk should be free"); | |
1357 ret->linkPrev(NULL); | |
1358 } | |
1359 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); | |
1360 return ret; | |
1361 } | |
1362 ShouldNotReachHere(); | |
1363 } | |
1364 | |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1365 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const { |
0 | 1366 assert(fc->size() < IndexSetSize, "Size of chunk is too large"); |
1367 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc); | |
1368 } | |
1369 | |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1370 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const { |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1371 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) || |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1372 (_smallLinearAllocBlock._word_size == fc->size()), |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1373 "Linear allocation block shows incorrect size"); |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1374 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) && |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1375 (_smallLinearAllocBlock._word_size == fc->size())); |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1376 } |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1377 |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1378 // Check if the purported free chunk is present either as a linear |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1379 // allocation block, the size-indexed table of (smaller) free blocks, |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1380 // or the larger free blocks kept in the binary tree dictionary. |
0 | 1381 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const { |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1382 if (verify_chunk_is_linear_alloc_block(fc)) { |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1383 return true; |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1384 } else if (fc->size() < IndexSetSize) { |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1385 return verifyChunkInIndexedFreeLists(fc); |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
1386 } else { |
0 | 1387 return dictionary()->verifyChunkInFreeLists(fc); |
1388 } | |
1389 } | |
1390 | |
1391 #ifndef PRODUCT | |
1392 void CompactibleFreeListSpace::assert_locked() const { | |
1393 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock()); | |
1394 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1395 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1396 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1397 CMSLockVerifier::assert_locked(lock); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1398 } |
0 | 1399 #endif |
1400 | |
1401 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) { | |
1402 // In the parallel case, the main thread holds the free list lock | |
1403 // on behalf the parallel threads. | |
1404 FreeChunk* fc; | |
1405 { | |
1406 // If GC is parallel, this might be called by several threads. | |
1407 // This should be rare enough that the locking overhead won't affect | |
1408 // the sequential code. | |
1409 MutexLockerEx x(parDictionaryAllocLock(), | |
1410 Mutex::_no_safepoint_check_flag); | |
1411 fc = getChunkFromDictionary(size); | |
1412 } | |
1413 if (fc != NULL) { | |
1414 fc->dontCoalesce(); | |
1415 assert(fc->isFree(), "Should be free, but not coalescable"); | |
1416 // Verify that the block offset table shows this to | |
1417 // be a single block, but not one which is unallocated. | |
1418 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
1419 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
1420 } | |
1421 return fc; | |
1422 } | |
1423 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
1424 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) { |
0 | 1425 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1426 assert_locked(); | |
1427 | |
1428 // if we are tracking promotions, then first ensure space for | |
1429 // promotion (including spooling space for saving header if necessary). | |
1430 // then allocate and copy, then track promoted info if needed. | |
1431 // When tracking (see PromotionInfo::track()), the mark word may | |
1432 // be displaced and in this case restoration of the mark word | |
1433 // occurs in the (oop_since_save_marks_)iterate phase. | |
1434 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) { | |
1435 return NULL; | |
1436 } | |
1437 // Call the allocate(size_t, bool) form directly to avoid the | |
1438 // additional call through the allocate(size_t) form. Having | |
1439 // the compile inline the call is problematic because allocate(size_t) | |
1440 // is a virtual method. | |
1441 HeapWord* res = allocate(adjustObjectSize(obj_size)); | |
1442 if (res != NULL) { | |
1443 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size); | |
1444 // if we should be tracking promotions, do so. | |
1445 if (_promoInfo.tracking()) { | |
1446 _promoInfo.track((PromotedObject*)res); | |
1447 } | |
1448 } | |
1449 return oop(res); | |
1450 } | |
1451 | |
1452 HeapWord* | |
1453 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) { | |
1454 assert_locked(); | |
1455 assert(size >= MinChunkSize, "minimum chunk size"); | |
1456 assert(size < _smallLinearAllocBlock._allocation_size_limit, | |
1457 "maximum from smallLinearAllocBlock"); | |
1458 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size); | |
1459 } | |
1460 | |
1461 HeapWord* | |
1462 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk, | |
1463 size_t size) { | |
1464 assert_locked(); | |
1465 assert(size >= MinChunkSize, "too small"); | |
1466 HeapWord* res = NULL; | |
1467 // Try to do linear allocation from blk, making sure that | |
1468 if (blk->_word_size == 0) { | |
1469 // We have probably been unable to fill this either in the prologue or | |
1470 // when it was exhausted at the last linear allocation. Bail out until | |
1471 // next time. | |
1472 assert(blk->_ptr == NULL, "consistency check"); | |
1473 return NULL; | |
1474 } | |
1475 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check"); | |
1476 res = getChunkFromLinearAllocBlockRemainder(blk, size); | |
1477 if (res != NULL) return res; | |
1478 | |
1479 // about to exhaust this linear allocation block | |
1480 if (blk->_word_size == size) { // exactly satisfied | |
1481 res = blk->_ptr; | |
1482 _bt.allocated(res, blk->_word_size); | |
1483 } else if (size + MinChunkSize <= blk->_refillSize) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1484 size_t sz = blk->_word_size; |
0 | 1485 // Update _unallocated_block if the size is such that chunk would be |
1486 // returned to the indexed free list. All other chunks in the indexed | |
1487 // free lists are allocated from the dictionary so that _unallocated_block | |
1488 // has already been adjusted for them. Do it here so that the cost | |
1489 // for all chunks added back to the indexed free lists. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1490 if (sz < SmallForDictionary) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1491 _bt.allocated(blk->_ptr, sz); |
0 | 1492 } |
1493 // Return the chunk that isn't big enough, and then refill below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1494 addChunkToFreeLists(blk->_ptr, sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1495 splitBirth(sz); |
0 | 1496 // Don't keep statistics on adding back chunk from a LinAB. |
1497 } else { | |
1498 // A refilled block would not satisfy the request. | |
1499 return NULL; | |
1500 } | |
1501 | |
1502 blk->_ptr = NULL; blk->_word_size = 0; | |
1503 refillLinearAllocBlock(blk); | |
1504 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize, | |
1505 "block was replenished"); | |
1506 if (res != NULL) { | |
1507 splitBirth(size); | |
1508 repairLinearAllocBlock(blk); | |
1509 } else if (blk->_ptr != NULL) { | |
1510 res = blk->_ptr; | |
1511 size_t blk_size = blk->_word_size; | |
1512 blk->_word_size -= size; | |
1513 blk->_ptr += size; | |
1514 splitBirth(size); | |
1515 repairLinearAllocBlock(blk); | |
1516 // Update BOT last so that other (parallel) GC threads see a consistent | |
1517 // view of the BOT and free blocks. | |
1518 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1519 OrderAccess::storestore(); |
0 | 1520 _bt.split_block(res, blk_size, size); // adjust block offset table |
1521 } | |
1522 return res; | |
1523 } | |
1524 | |
1525 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder( | |
1526 LinearAllocBlock* blk, | |
1527 size_t size) { | |
1528 assert_locked(); | |
1529 assert(size >= MinChunkSize, "too small"); | |
1530 | |
1531 HeapWord* res = NULL; | |
1532 // This is the common case. Keep it simple. | |
1533 if (blk->_word_size >= size + MinChunkSize) { | |
1534 assert(blk->_ptr != NULL, "consistency check"); | |
1535 res = blk->_ptr; | |
1536 // Note that the BOT is up-to-date for the linAB before allocation. It | |
1537 // indicates the start of the linAB. The split_block() updates the | |
1538 // BOT for the linAB after the allocation (indicates the start of the | |
1539 // next chunk to be allocated). | |
1540 size_t blk_size = blk->_word_size; | |
1541 blk->_word_size -= size; | |
1542 blk->_ptr += size; | |
1543 splitBirth(size); | |
1544 repairLinearAllocBlock(blk); | |
1545 // Update BOT last so that other (parallel) GC threads see a consistent | |
1546 // view of the BOT and free blocks. | |
1547 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1548 OrderAccess::storestore(); |
0 | 1549 _bt.split_block(res, blk_size, size); // adjust block offset table |
1550 _bt.allocated(res, size); | |
1551 } | |
1552 return res; | |
1553 } | |
1554 | |
1555 FreeChunk* | |
1556 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) { | |
1557 assert_locked(); | |
1558 assert(size < SmallForDictionary, "just checking"); | |
1559 FreeChunk* res; | |
1560 res = _indexedFreeList[size].getChunkAtHead(); | |
1561 if (res == NULL) { | |
1562 res = getChunkFromIndexedFreeListHelper(size); | |
1563 } | |
1564 _bt.verify_not_unallocated((HeapWord*) res, size); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1565 assert(res == NULL || res->size() == size, "Incorrect block size"); |
0 | 1566 return res; |
1567 } | |
1568 | |
1569 FreeChunk* | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1570 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1571 bool replenish) { |
0 | 1572 assert_locked(); |
1573 FreeChunk* fc = NULL; | |
1574 if (size < SmallForDictionary) { | |
1575 assert(_indexedFreeList[size].head() == NULL || | |
1576 _indexedFreeList[size].surplus() <= 0, | |
1577 "List for this size should be empty or under populated"); | |
1578 // Try best fit in exact lists before replenishing the list | |
1579 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) { | |
1580 // Replenish list. | |
1581 // | |
1582 // Things tried that failed. | |
1583 // Tried allocating out of the two LinAB's first before | |
1584 // replenishing lists. | |
1585 // Tried small linAB of size 256 (size in indexed list) | |
1586 // and replenishing indexed lists from the small linAB. | |
1587 // | |
1588 FreeChunk* newFc = NULL; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1589 const size_t replenish_size = CMSIndexedFreeListReplenish * size; |
0 | 1590 if (replenish_size < SmallForDictionary) { |
1591 // Do not replenish from an underpopulated size. | |
1592 if (_indexedFreeList[replenish_size].surplus() > 0 && | |
1593 _indexedFreeList[replenish_size].head() != NULL) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1594 newFc = _indexedFreeList[replenish_size].getChunkAtHead(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1595 } else if (bestFitFirst()) { |
0 | 1596 newFc = bestFitSmall(replenish_size); |
1597 } | |
1598 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1599 if (newFc == NULL && replenish_size > size) { |
0 | 1600 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1601 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false); |
0 | 1602 } |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1603 // Note: The stats update re split-death of block obtained above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1604 // will be recorded below precisely when we know we are going to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1605 // be actually splitting it into more than one pieces below. |
0 | 1606 if (newFc != NULL) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1607 if (replenish || CMSReplenishIntermediate) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1608 // Replenish this list and return one block to caller. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1609 size_t i; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1610 FreeChunk *curFc, *nextFc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1611 size_t num_blk = newFc->size() / size; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1612 assert(num_blk >= 1, "Smaller than requested?"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1613 assert(newFc->size() % size == 0, "Should be integral multiple of request"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1614 if (num_blk > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1615 // we are sure we will be splitting the block just obtained |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1616 // into multiple pieces; record the split-death of the original |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1617 splitDeath(replenish_size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1618 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1619 // carve up and link blocks 0, ..., num_blk - 2 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1620 // The last chunk is not added to the lists but is returned as the |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1621 // free chunk. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1622 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1623 i = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1624 i < (num_blk - 1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1625 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1626 i++) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1627 curFc->setSize(size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1628 // Don't record this as a return in order to try and |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1629 // determine the "returns" from a GC. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1630 _bt.verify_not_unallocated((HeapWord*) fc, size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1631 _indexedFreeList[size].returnChunkAtTail(curFc, false); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1632 _bt.mark_block((HeapWord*)curFc, size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1633 splitBirth(size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1634 // Don't record the initial population of the indexed list |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1635 // as a split birth. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1636 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1637 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1638 // check that the arithmetic was OK above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1639 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1640 "inconsistency in carving newFc"); |
0 | 1641 curFc->setSize(size); |
1642 _bt.mark_block((HeapWord*)curFc, size); | |
1643 splitBirth(size); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1644 fc = curFc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1645 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1646 // Return entire block to caller |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1647 fc = newFc; |
0 | 1648 } |
1649 } | |
1650 } | |
1651 } else { | |
1652 // Get a free chunk from the free chunk dictionary to be returned to | |
1653 // replenish the indexed free list. | |
1654 fc = getChunkFromDictionaryExact(size); | |
1655 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1656 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk"); |
0 | 1657 return fc; |
1658 } | |
1659 | |
1660 FreeChunk* | |
1661 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) { | |
1662 assert_locked(); | |
1663 FreeChunk* fc = _dictionary->getChunk(size); | |
1664 if (fc == NULL) { | |
1665 return NULL; | |
1666 } | |
1667 _bt.allocated((HeapWord*)fc, fc->size()); | |
1668 if (fc->size() >= size + MinChunkSize) { | |
1669 fc = splitChunkAndReturnRemainder(fc, size); | |
1670 } | |
1671 assert(fc->size() >= size, "chunk too small"); | |
1672 assert(fc->size() < size + MinChunkSize, "chunk too big"); | |
1673 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
1674 return fc; | |
1675 } | |
1676 | |
1677 FreeChunk* | |
1678 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) { | |
1679 assert_locked(); | |
1680 FreeChunk* fc = _dictionary->getChunk(size); | |
1681 if (fc == NULL) { | |
1682 return fc; | |
1683 } | |
1684 _bt.allocated((HeapWord*)fc, fc->size()); | |
1685 if (fc->size() == size) { | |
1686 _bt.verify_single_block((HeapWord*)fc, size); | |
1687 return fc; | |
1688 } | |
1689 assert(fc->size() > size, "getChunk() guarantee"); | |
1690 if (fc->size() < size + MinChunkSize) { | |
1691 // Return the chunk to the dictionary and go get a bigger one. | |
1692 returnChunkToDictionary(fc); | |
1693 fc = _dictionary->getChunk(size + MinChunkSize); | |
1694 if (fc == NULL) { | |
1695 return NULL; | |
1696 } | |
1697 _bt.allocated((HeapWord*)fc, fc->size()); | |
1698 } | |
1699 assert(fc->size() >= size + MinChunkSize, "tautology"); | |
1700 fc = splitChunkAndReturnRemainder(fc, size); | |
1701 assert(fc->size() == size, "chunk is wrong size"); | |
1702 _bt.verify_single_block((HeapWord*)fc, size); | |
1703 return fc; | |
1704 } | |
1705 | |
1706 void | |
1707 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) { | |
1708 assert_locked(); | |
1709 | |
1710 size_t size = chunk->size(); | |
1711 _bt.verify_single_block((HeapWord*)chunk, size); | |
1712 // adjust _unallocated_block downward, as necessary | |
1713 _bt.freed((HeapWord*)chunk, size); | |
1714 _dictionary->returnChunk(chunk); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1715 #ifndef PRODUCT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1716 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1717 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1718 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1719 #endif // PRODUCT |
0 | 1720 } |
1721 | |
1722 void | |
1723 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) { | |
1724 assert_locked(); | |
1725 size_t size = fc->size(); | |
1726 _bt.verify_single_block((HeapWord*) fc, size); | |
1727 _bt.verify_not_unallocated((HeapWord*) fc, size); | |
1728 if (_adaptive_freelists) { | |
1729 _indexedFreeList[size].returnChunkAtTail(fc); | |
1730 } else { | |
1731 _indexedFreeList[size].returnChunkAtHead(fc); | |
1732 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1733 #ifndef PRODUCT |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1734 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1735 _indexedFreeList[size].verify_stats(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1736 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
1737 #endif // PRODUCT |
0 | 1738 } |
1739 | |
1740 // Add chunk to end of last block -- if it's the largest | |
1741 // block -- and update BOT and census data. We would | |
1742 // of course have preferred to coalesce it with the | |
1743 // last block, but it's currently less expensive to find the | |
1744 // largest block than it is to find the last. | |
1745 void | |
1746 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats( | |
1747 HeapWord* chunk, size_t size) { | |
1748 // check that the chunk does lie in this space! | |
1749 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); | |
1750 // One of the parallel gc task threads may be here | |
1751 // whilst others are allocating. | |
1752 Mutex* lock = NULL; | |
1753 if (ParallelGCThreads != 0) { | |
1754 lock = &_parDictionaryAllocLock; | |
1755 } | |
1756 FreeChunk* ec; | |
1757 { | |
1758 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
1759 ec = dictionary()->findLargestDict(); // get largest block | |
1760 if (ec != NULL && ec->end() == chunk) { | |
1761 // It's a coterminal block - we can coalesce. | |
1762 size_t old_size = ec->size(); | |
1763 coalDeath(old_size); | |
1764 removeChunkFromDictionary(ec); | |
1765 size += old_size; | |
1766 } else { | |
1767 ec = (FreeChunk*)chunk; | |
1768 } | |
1769 } | |
1770 ec->setSize(size); | |
1771 debug_only(ec->mangleFreed(size)); | |
1772 if (size < SmallForDictionary) { | |
1773 lock = _indexedFreeListParLocks[size]; | |
1774 } | |
1775 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); | |
1776 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true); | |
1777 // record the birth under the lock since the recording involves | |
1778 // manipulation of the list on which the chunk lives and | |
1779 // if the chunk is allocated and is the last on the list, | |
1780 // the list can go away. | |
1781 coalBirth(size); | |
1782 } | |
1783 | |
1784 void | |
1785 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk, | |
1786 size_t size) { | |
1787 // check that the chunk does lie in this space! | |
1788 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); | |
1789 assert_locked(); | |
1790 _bt.verify_single_block(chunk, size); | |
1791 | |
1792 FreeChunk* fc = (FreeChunk*) chunk; | |
1793 fc->setSize(size); | |
1794 debug_only(fc->mangleFreed(size)); | |
1795 if (size < SmallForDictionary) { | |
1796 returnChunkToFreeList(fc); | |
1797 } else { | |
1798 returnChunkToDictionary(fc); | |
1799 } | |
1800 } | |
1801 | |
1802 void | |
1803 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk, | |
1804 size_t size, bool coalesced) { | |
1805 assert_locked(); | |
1806 assert(chunk != NULL, "null chunk"); | |
1807 if (coalesced) { | |
1808 // repair BOT | |
1809 _bt.single_block(chunk, size); | |
1810 } | |
1811 addChunkToFreeLists(chunk, size); | |
1812 } | |
1813 | |
1814 // We _must_ find the purported chunk on our free lists; | |
1815 // we assert if we don't. | |
1816 void | |
1817 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) { | |
1818 size_t size = fc->size(); | |
1819 assert_locked(); | |
1820 debug_only(verifyFreeLists()); | |
1821 if (size < SmallForDictionary) { | |
1822 removeChunkFromIndexedFreeList(fc); | |
1823 } else { | |
1824 removeChunkFromDictionary(fc); | |
1825 } | |
1826 _bt.verify_single_block((HeapWord*)fc, size); | |
1827 debug_only(verifyFreeLists()); | |
1828 } | |
1829 | |
1830 void | |
1831 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) { | |
1832 size_t size = fc->size(); | |
1833 assert_locked(); | |
1834 assert(fc != NULL, "null chunk"); | |
1835 _bt.verify_single_block((HeapWord*)fc, size); | |
1836 _dictionary->removeChunk(fc); | |
1837 // adjust _unallocated_block upward, as necessary | |
1838 _bt.allocated((HeapWord*)fc, size); | |
1839 } | |
1840 | |
1841 void | |
1842 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) { | |
1843 assert_locked(); | |
1844 size_t size = fc->size(); | |
1845 _bt.verify_single_block((HeapWord*)fc, size); | |
1846 NOT_PRODUCT( | |
1847 if (FLSVerifyIndexTable) { | |
1848 verifyIndexedFreeList(size); | |
1849 } | |
1850 ) | |
1851 _indexedFreeList[size].removeChunk(fc); | |
1852 NOT_PRODUCT( | |
1853 if (FLSVerifyIndexTable) { | |
1854 verifyIndexedFreeList(size); | |
1855 } | |
1856 ) | |
1857 } | |
1858 | |
1859 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { | |
1860 /* A hint is the next larger size that has a surplus. | |
1861 Start search at a size large enough to guarantee that | |
1862 the excess is >= MIN_CHUNK. */ | |
1863 size_t start = align_object_size(numWords + MinChunkSize); | |
1864 if (start < IndexSetSize) { | |
1865 FreeList* it = _indexedFreeList; | |
1866 size_t hint = _indexedFreeList[start].hint(); | |
1867 while (hint < IndexSetSize) { | |
1868 assert(hint % MinObjAlignment == 0, "hint should be aligned"); | |
1869 FreeList *fl = &_indexedFreeList[hint]; | |
1870 if (fl->surplus() > 0 && fl->head() != NULL) { | |
1871 // Found a list with surplus, reset original hint | |
1872 // and split out a free chunk which is returned. | |
1873 _indexedFreeList[start].set_hint(hint); | |
1874 FreeChunk* res = getFromListGreater(fl, numWords); | |
1875 assert(res == NULL || res->isFree(), | |
1876 "Should be returning a free chunk"); | |
1877 return res; | |
1878 } | |
1879 hint = fl->hint(); /* keep looking */ | |
1880 } | |
1881 /* None found. */ | |
1882 it[start].set_hint(IndexSetSize); | |
1883 } | |
1884 return NULL; | |
1885 } | |
1886 | |
1887 /* Requires fl->size >= numWords + MinChunkSize */ | |
1888 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl, | |
1889 size_t numWords) { | |
1890 FreeChunk *curr = fl->head(); | |
1891 size_t oldNumWords = curr->size(); | |
1892 assert(numWords >= MinChunkSize, "Word size is too small"); | |
1893 assert(curr != NULL, "List is empty"); | |
1894 assert(oldNumWords >= numWords + MinChunkSize, | |
1895 "Size of chunks in the list is too small"); | |
1896 | |
1897 fl->removeChunk(curr); | |
1898 // recorded indirectly by splitChunkAndReturnRemainder - | |
1899 // smallSplit(oldNumWords, numWords); | |
1900 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords); | |
1901 // Does anything have to be done for the remainder in terms of | |
1902 // fixing the card table? | |
1903 assert(new_chunk == NULL || new_chunk->isFree(), | |
1904 "Should be returning a free chunk"); | |
1905 return new_chunk; | |
1906 } | |
1907 | |
1908 FreeChunk* | |
1909 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk, | |
1910 size_t new_size) { | |
1911 assert_locked(); | |
1912 size_t size = chunk->size(); | |
1913 assert(size > new_size, "Split from a smaller block?"); | |
1914 assert(is_aligned(chunk), "alignment problem"); | |
1915 assert(size == adjustObjectSize(size), "alignment problem"); | |
1916 size_t rem_size = size - new_size; | |
1917 assert(rem_size == adjustObjectSize(rem_size), "alignment problem"); | |
1918 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum"); | |
1919 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size); | |
1920 assert(is_aligned(ffc), "alignment problem"); | |
1921 ffc->setSize(rem_size); | |
1922 ffc->linkNext(NULL); | |
1923 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. | |
1924 // Above must occur before BOT is updated below. | |
1925 // adjust block offset table | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1926 OrderAccess::storestore(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1927 assert(chunk->isFree() && ffc->isFree(), "Error"); |
0 | 1928 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); |
1929 if (rem_size < SmallForDictionary) { | |
1930 bool is_par = (SharedHeap::heap()->n_par_threads() > 0); | |
1931 if (is_par) _indexedFreeListParLocks[rem_size]->lock(); | |
4095
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
4068
diff
changeset
|
1932 assert(!is_par || |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
4068
diff
changeset
|
1933 (SharedHeap::heap()->n_par_threads() == |
bca17e38de00
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
4068
diff
changeset
|
1934 SharedHeap::heap()->workers()->active_workers()), "Mismatch"); |
0 | 1935 returnChunkToFreeList(ffc); |
1936 split(size, rem_size); | |
1937 if (is_par) _indexedFreeListParLocks[rem_size]->unlock(); | |
1938 } else { | |
1939 returnChunkToDictionary(ffc); | |
1940 split(size ,rem_size); | |
1941 } | |
1942 chunk->setSize(new_size); | |
1943 return chunk; | |
1944 } | |
1945 | |
1946 void | |
1947 CompactibleFreeListSpace::sweep_completed() { | |
1948 // Now that space is probably plentiful, refill linear | |
1949 // allocation blocks as needed. | |
1950 refillLinearAllocBlocksIfNeeded(); | |
1951 } | |
1952 | |
1953 void | |
1954 CompactibleFreeListSpace::gc_prologue() { | |
1955 assert_locked(); | |
1956 if (PrintFLSStatistics != 0) { | |
1957 gclog_or_tty->print("Before GC:\n"); | |
1958 reportFreeListStatistics(); | |
1959 } | |
1960 refillLinearAllocBlocksIfNeeded(); | |
1961 } | |
1962 | |
1963 void | |
1964 CompactibleFreeListSpace::gc_epilogue() { | |
1965 assert_locked(); | |
1966 if (PrintGCDetails && Verbose && !_adaptive_freelists) { | |
1967 if (_smallLinearAllocBlock._word_size == 0) | |
1968 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure"); | |
1969 } | |
1970 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); | |
1971 _promoInfo.stopTrackingPromotions(); | |
1972 repairLinearAllocationBlocks(); | |
1973 // Print Space's stats | |
1974 if (PrintFLSStatistics != 0) { | |
1975 gclog_or_tty->print("After GC:\n"); | |
1976 reportFreeListStatistics(); | |
1977 } | |
1978 } | |
1979 | |
1980 // Iteration support, mostly delegated from a CMS generation | |
1981 | |
1982 void CompactibleFreeListSpace::save_marks() { | |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1983 assert(Thread::current()->is_VM_thread(), |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1984 "Global variable should only be set when single-threaded"); |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1985 // Mark the "end" of the used space at the time of this call; |
0 | 1986 // note, however, that promoted objects from this point |
1987 // on are tracked in the _promoInfo below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
1988 set_saved_mark_word(unallocated_block()); |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1989 #ifdef ASSERT |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1990 // Check the sanity of save_marks() etc. |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1991 MemRegion ur = used_region(); |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1992 MemRegion urasm = used_region_at_save_marks(); |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1993 assert(ur.contains(urasm), |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1994 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")" |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1995 " should contain [" PTR_FORMAT "," PTR_FORMAT ")", |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1996 ur.start(), ur.end(), urasm.start(), urasm.end())); |
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2426
diff
changeset
|
1997 #endif |
0 | 1998 // inform allocator that promotions should be tracked. |
1999 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); | |
2000 _promoInfo.startTrackingPromotions(); | |
2001 } | |
2002 | |
2003 bool CompactibleFreeListSpace::no_allocs_since_save_marks() { | |
2004 assert(_promoInfo.tracking(), "No preceding save_marks?"); | |
1777
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
2005 assert(SharedHeap::heap()->n_par_threads() == 0, |
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
2006 "Shouldn't be called if using parallel gc."); |
0 | 2007 return _promoInfo.noPromotions(); |
2008 } | |
2009 | |
2010 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
2011 \ | |
2012 void CompactibleFreeListSpace:: \ | |
2013 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ | |
2014 assert(SharedHeap::heap()->n_par_threads() == 0, \ | |
2015 "Shouldn't be called (yet) during parallel part of gc."); \ | |
2016 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \ | |
2017 /* \ | |
2018 * This also restores any displaced headers and removes the elements from \ | |
2019 * the iteration set as they are processed, so that we have a clean slate \ | |
2020 * at the end of the iteration. Note, thus, that if new objects are \ | |
2021 * promoted as a result of the iteration they are iterated over as well. \ | |
2022 */ \ | |
2023 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \ | |
2024 } | |
2025 | |
2026 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN) | |
2027 | |
2028 | |
2029 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) { | |
2030 // ugghh... how would one do this efficiently for a non-contiguous space? | |
2031 guarantee(false, "NYI"); | |
2032 } | |
2033 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2034 bool CompactibleFreeListSpace::linearAllocationWouldFail() const { |
0 | 2035 return _smallLinearAllocBlock._word_size == 0; |
2036 } | |
2037 | |
2038 void CompactibleFreeListSpace::repairLinearAllocationBlocks() { | |
2039 // Fix up linear allocation blocks to look like free blocks | |
2040 repairLinearAllocBlock(&_smallLinearAllocBlock); | |
2041 } | |
2042 | |
2043 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) { | |
2044 assert_locked(); | |
2045 if (blk->_ptr != NULL) { | |
2046 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize, | |
2047 "Minimum block size requirement"); | |
2048 FreeChunk* fc = (FreeChunk*)(blk->_ptr); | |
2049 fc->setSize(blk->_word_size); | |
2050 fc->linkPrev(NULL); // mark as free | |
2051 fc->dontCoalesce(); | |
2052 assert(fc->isFree(), "just marked it free"); | |
2053 assert(fc->cantCoalesce(), "just marked it uncoalescable"); | |
2054 } | |
2055 } | |
2056 | |
2057 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() { | |
2058 assert_locked(); | |
2059 if (_smallLinearAllocBlock._ptr == NULL) { | |
2060 assert(_smallLinearAllocBlock._word_size == 0, | |
2061 "Size of linAB should be zero if the ptr is NULL"); | |
2062 // Reset the linAB refill and allocation size limit. | |
2063 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc); | |
2064 } | |
2065 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock); | |
2066 } | |
2067 | |
2068 void | |
2069 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) { | |
2070 assert_locked(); | |
2071 assert((blk->_ptr == NULL && blk->_word_size == 0) || | |
2072 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize), | |
2073 "blk invariant"); | |
2074 if (blk->_ptr == NULL) { | |
2075 refillLinearAllocBlock(blk); | |
2076 } | |
2077 if (PrintMiscellaneous && Verbose) { | |
2078 if (blk->_word_size == 0) { | |
2079 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure"); | |
2080 } | |
2081 } | |
2082 } | |
2083 | |
2084 void | |
2085 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) { | |
2086 assert_locked(); | |
2087 assert(blk->_word_size == 0 && blk->_ptr == NULL, | |
2088 "linear allocation block should be empty"); | |
2089 FreeChunk* fc; | |
2090 if (blk->_refillSize < SmallForDictionary && | |
2091 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) { | |
2092 // A linAB's strategy might be to use small sizes to reduce | |
2093 // fragmentation but still get the benefits of allocation from a | |
2094 // linAB. | |
2095 } else { | |
2096 fc = getChunkFromDictionary(blk->_refillSize); | |
2097 } | |
2098 if (fc != NULL) { | |
2099 blk->_ptr = (HeapWord*)fc; | |
2100 blk->_word_size = fc->size(); | |
2101 fc->dontCoalesce(); // to prevent sweeper from sweeping us up | |
2102 } | |
2103 } | |
2104 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2105 // Support for concurrent collection policy decisions. |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2106 bool CompactibleFreeListSpace::should_concurrent_collect() const { |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2107 // In the future we might want to add in frgamentation stats -- |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2108 // including erosion of the "mountain" into this decision as well. |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2109 return !adaptive_freelists() && linearAllocationWouldFail(); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2110 } |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2111 |
0 | 2112 // Support for compaction |
2113 | |
2114 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { | |
2115 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); | |
2116 // prepare_for_compaction() uses the space between live objects | |
2117 // so that later phase can skip dead space quickly. So verification | |
2118 // of the free lists doesn't work after. | |
2119 } | |
2120 | |
2121 #define obj_size(q) adjustObjectSize(oop(q)->size()) | |
2122 #define adjust_obj_size(s) adjustObjectSize(s) | |
2123 | |
2124 void CompactibleFreeListSpace::adjust_pointers() { | |
2125 // In other versions of adjust_pointers(), a bail out | |
2126 // based on the amount of live data in the generation | |
2127 // (i.e., if 0, bail out) may be used. | |
2128 // Cannot test used() == 0 here because the free lists have already | |
2129 // been mangled by the compaction. | |
2130 | |
2131 SCAN_AND_ADJUST_POINTERS(adjust_obj_size); | |
2132 // See note about verification in prepare_for_compaction(). | |
2133 } | |
2134 | |
2135 void CompactibleFreeListSpace::compact() { | |
2136 SCAN_AND_COMPACT(obj_size); | |
2137 } | |
2138 | |
2139 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] | |
2140 // where fbs is free block sizes | |
2141 double CompactibleFreeListSpace::flsFrag() const { | |
2142 size_t itabFree = totalSizeInIndexedFreeLists(); | |
2143 double frag = 0.0; | |
2144 size_t i; | |
2145 | |
2146 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2147 double sz = i; | |
2148 frag += _indexedFreeList[i].count() * (sz * sz); | |
2149 } | |
2150 | |
2151 double totFree = itabFree + | |
2152 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); | |
2153 if (totFree > 0) { | |
2154 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / | |
2155 (totFree * totFree)); | |
2156 frag = (double)1.0 - frag; | |
2157 } else { | |
2158 assert(frag == 0.0, "Follows from totFree == 0"); | |
2159 } | |
2160 return frag; | |
2161 } | |
2162 | |
2163 void CompactibleFreeListSpace::beginSweepFLCensus( | |
2164 float inter_sweep_current, | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2165 float inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2166 float intra_sweep_estimate) { |
0 | 2167 assert_locked(); |
2168 size_t i; | |
2169 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2170 FreeList* fl = &_indexedFreeList[i]; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2171 if (PrintFLSStatistics > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2172 gclog_or_tty->print("size[%d] : ", i); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2173 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2174 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2175 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent)); |
0 | 2176 fl->set_beforeSweep(fl->count()); |
2177 fl->set_bfrSurp(fl->surplus()); | |
2178 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2179 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent, |
0 | 2180 inter_sweep_current, |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2181 inter_sweep_estimate, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2182 intra_sweep_estimate); |
0 | 2183 } |
2184 | |
2185 void CompactibleFreeListSpace::setFLSurplus() { | |
2186 assert_locked(); | |
2187 size_t i; | |
2188 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2189 FreeList *fl = &_indexedFreeList[i]; | |
2190 fl->set_surplus(fl->count() - | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2191 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent)); |
0 | 2192 } |
2193 } | |
2194 | |
2195 void CompactibleFreeListSpace::setFLHints() { | |
2196 assert_locked(); | |
2197 size_t i; | |
2198 size_t h = IndexSetSize; | |
2199 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { | |
2200 FreeList *fl = &_indexedFreeList[i]; | |
2201 fl->set_hint(h); | |
2202 if (fl->surplus() > 0) { | |
2203 h = i; | |
2204 } | |
2205 } | |
2206 } | |
2207 | |
2208 void CompactibleFreeListSpace::clearFLCensus() { | |
2209 assert_locked(); | |
4068
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
2210 size_t i; |
0 | 2211 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
2212 FreeList *fl = &_indexedFreeList[i]; | |
2213 fl->set_prevSweep(fl->count()); | |
2214 fl->set_coalBirths(0); | |
2215 fl->set_coalDeaths(0); | |
2216 fl->set_splitBirths(0); | |
2217 fl->set_splitDeaths(0); | |
2218 } | |
2219 } | |
2220 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2221 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2222 if (PrintFLSStatistics > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2223 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2224 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2225 largestAddr); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2226 } |
0 | 2227 setFLSurplus(); |
2228 setFLHints(); | |
2229 if (PrintGC && PrintFLSCensus > 0) { | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2230 printFLCensus(sweep_count); |
0 | 2231 } |
2232 clearFLCensus(); | |
2233 assert_locked(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2234 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent); |
0 | 2235 } |
2236 | |
2237 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { | |
2238 if (size < SmallForDictionary) { | |
2239 FreeList *fl = &_indexedFreeList[size]; | |
2240 return (fl->coalDesired() < 0) || | |
2241 ((int)fl->count() > fl->coalDesired()); | |
2242 } else { | |
2243 return dictionary()->coalDictOverPopulated(size); | |
2244 } | |
2245 } | |
2246 | |
2247 void CompactibleFreeListSpace::smallCoalBirth(size_t size) { | |
2248 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2249 FreeList *fl = &_indexedFreeList[size]; | |
2250 fl->increment_coalBirths(); | |
2251 fl->increment_surplus(); | |
2252 } | |
2253 | |
2254 void CompactibleFreeListSpace::smallCoalDeath(size_t size) { | |
2255 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2256 FreeList *fl = &_indexedFreeList[size]; | |
2257 fl->increment_coalDeaths(); | |
2258 fl->decrement_surplus(); | |
2259 } | |
2260 | |
2261 void CompactibleFreeListSpace::coalBirth(size_t size) { | |
2262 if (size < SmallForDictionary) { | |
2263 smallCoalBirth(size); | |
2264 } else { | |
2265 dictionary()->dictCensusUpdate(size, | |
2266 false /* split */, | |
2267 true /* birth */); | |
2268 } | |
2269 } | |
2270 | |
2271 void CompactibleFreeListSpace::coalDeath(size_t size) { | |
2272 if(size < SmallForDictionary) { | |
2273 smallCoalDeath(size); | |
2274 } else { | |
2275 dictionary()->dictCensusUpdate(size, | |
2276 false /* split */, | |
2277 false /* birth */); | |
2278 } | |
2279 } | |
2280 | |
2281 void CompactibleFreeListSpace::smallSplitBirth(size_t size) { | |
2282 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2283 FreeList *fl = &_indexedFreeList[size]; | |
2284 fl->increment_splitBirths(); | |
2285 fl->increment_surplus(); | |
2286 } | |
2287 | |
2288 void CompactibleFreeListSpace::smallSplitDeath(size_t size) { | |
2289 assert(size < SmallForDictionary, "Size too large for indexed list"); | |
2290 FreeList *fl = &_indexedFreeList[size]; | |
2291 fl->increment_splitDeaths(); | |
2292 fl->decrement_surplus(); | |
2293 } | |
2294 | |
2295 void CompactibleFreeListSpace::splitBirth(size_t size) { | |
2296 if (size < SmallForDictionary) { | |
2297 smallSplitBirth(size); | |
2298 } else { | |
2299 dictionary()->dictCensusUpdate(size, | |
2300 true /* split */, | |
2301 true /* birth */); | |
2302 } | |
2303 } | |
2304 | |
2305 void CompactibleFreeListSpace::splitDeath(size_t size) { | |
2306 if (size < SmallForDictionary) { | |
2307 smallSplitDeath(size); | |
2308 } else { | |
2309 dictionary()->dictCensusUpdate(size, | |
2310 true /* split */, | |
2311 false /* birth */); | |
2312 } | |
2313 } | |
2314 | |
2315 void CompactibleFreeListSpace::split(size_t from, size_t to1) { | |
2316 size_t to2 = from - to1; | |
2317 splitDeath(from); | |
2318 splitBirth(to1); | |
2319 splitBirth(to2); | |
2320 } | |
2321 | |
2322 void CompactibleFreeListSpace::print() const { | |
1952
4df7f8cba524
6996613: CompactibleFreeListSpace::print should call CompactibleFreeListSpace::print_on, not Space::print_on
ysr
parents:
1951
diff
changeset
|
2323 print_on(tty); |
0 | 2324 } |
2325 | |
2326 void CompactibleFreeListSpace::prepare_for_verify() { | |
2327 assert_locked(); | |
2328 repairLinearAllocationBlocks(); | |
2329 // Verify that the SpoolBlocks look like free blocks of | |
2330 // appropriate sizes... To be done ... | |
2331 } | |
2332 | |
2333 class VerifyAllBlksClosure: public BlkClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2334 private: |
0 | 2335 const CompactibleFreeListSpace* _sp; |
2336 const MemRegion _span; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2337 HeapWord* _last_addr; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2338 size_t _last_size; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2339 bool _last_was_obj; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2340 bool _last_was_live; |
0 | 2341 |
2342 public: | |
2343 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2344 MemRegion span) : _sp(sp), _span(span), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2345 _last_addr(NULL), _last_size(0), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2346 _last_was_obj(false), _last_was_live(false) { } |
0 | 2347 |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2348 virtual size_t do_blk(HeapWord* addr) { |
0 | 2349 size_t res; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2350 bool was_obj = false; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2351 bool was_live = false; |
0 | 2352 if (_sp->block_is_obj(addr)) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2353 was_obj = true; |
0 | 2354 oop p = oop(addr); |
2355 guarantee(p->is_oop(), "Should be an oop"); | |
2356 res = _sp->adjustObjectSize(p->size()); | |
2357 if (_sp->obj_is_alive(addr)) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2358 was_live = true; |
0 | 2359 p->verify(); |
2360 } | |
2361 } else { | |
2362 FreeChunk* fc = (FreeChunk*)addr; | |
2363 res = fc->size(); | |
2364 if (FLSVerifyLists && !fc->cantCoalesce()) { | |
2365 guarantee(_sp->verifyChunkInFreeLists(fc), | |
2366 "Chunk should be on a free list"); | |
2367 } | |
2368 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2369 if (res == 0) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2370 gclog_or_tty->print_cr("Livelock: no rank reduction!"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2371 gclog_or_tty->print_cr( |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2372 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n" |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2373 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2374 addr, res, was_obj ?"true":"false", was_live ?"true":"false", |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2375 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2376 _sp->print_on(gclog_or_tty); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2377 guarantee(false, "Seppuku!"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2378 } |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2379 _last_addr = addr; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2380 _last_size = res; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2381 _last_was_obj = was_obj; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2382 _last_was_live = was_live; |
0 | 2383 return res; |
2384 } | |
2385 }; | |
2386 | |
2387 class VerifyAllOopsClosure: public OopClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2388 private: |
0 | 2389 const CMSCollector* _collector; |
2390 const CompactibleFreeListSpace* _sp; | |
2391 const MemRegion _span; | |
2392 const bool _past_remark; | |
2393 const CMSBitMap* _bit_map; | |
2394 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2395 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2396 void do_oop(void* p, oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2397 if (_span.contains(obj)) { // the interior oop points into CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2398 if (!_span.contains(p)) { // reference from outside CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2399 // Should be a valid object; the first disjunct below allows |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2400 // us to sidestep an assertion in block_is_obj() that insists |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2401 // that p be in _sp. Note that several generations (and spaces) |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2402 // are spanned by _span (CMS heap) above. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2403 guarantee(!_sp->is_in_reserved(obj) || |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2404 _sp->block_is_obj((HeapWord*)obj), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2405 "Should be an object"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2406 guarantee(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2407 obj->verify(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2408 if (_past_remark) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2409 // Remark has been completed, the object should be marked |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2410 _bit_map->isMarked((HeapWord*)obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2411 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2412 } else { // reference within CMS heap |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2413 if (_past_remark) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2414 // Remark has been completed -- so the referent should have |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2415 // been marked, if referring object is. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2416 if (_bit_map->isMarked(_collector->block_start(p))) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2417 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2418 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2419 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2420 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2421 } else if (_sp->is_in_reserved(p)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2422 // the reference is from FLS, and points out of FLS |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2423 guarantee(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2424 obj->verify(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2425 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2426 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2427 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2428 template <class T> void do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2429 T heap_oop = oopDesc::load_heap_oop(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2430 if (!oopDesc::is_null(heap_oop)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2431 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2432 do_oop(p, obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2433 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2434 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2435 |
0 | 2436 public: |
2437 VerifyAllOopsClosure(const CMSCollector* collector, | |
2438 const CompactibleFreeListSpace* sp, MemRegion span, | |
2439 bool past_remark, CMSBitMap* bit_map) : | |
2440 OopClosure(), _collector(collector), _sp(sp), _span(span), | |
2441 _past_remark(past_remark), _bit_map(bit_map) { } | |
2442 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2443 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
12
diff
changeset
|
2444 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
0 | 2445 }; |
2446 | |
2447 void CompactibleFreeListSpace::verify(bool ignored) const { | |
2448 assert_lock_strong(&_freelistLock); | |
2449 verify_objects_initialized(); | |
2450 MemRegion span = _collector->_span; | |
2451 bool past_remark = (_collector->abstract_state() == | |
2452 CMSCollector::Sweeping); | |
2453 | |
2454 ResourceMark rm; | |
2455 HandleMark hm; | |
2456 | |
2457 // Check integrity of CFL data structures | |
2458 _promoInfo.verify(); | |
2459 _dictionary->verify(); | |
2460 if (FLSVerifyIndexTable) { | |
2461 verifyIndexedFreeLists(); | |
2462 } | |
2463 // Check integrity of all objects and free blocks in space | |
2464 { | |
2465 VerifyAllBlksClosure cl(this, span); | |
2466 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const | |
2467 } | |
2468 // Check that all references in the heap to FLS | |
2469 // are to valid objects in FLS or that references in | |
2470 // FLS are to valid objects elsewhere in the heap | |
2471 if (FLSVerifyAllHeapReferences) | |
2472 { | |
2473 VerifyAllOopsClosure cl(_collector, this, span, past_remark, | |
2474 _collector->markBitMap()); | |
2475 CollectedHeap* ch = Universe::heap(); | |
2476 ch->oop_iterate(&cl); // all oops in generations | |
2477 ch->permanent_oop_iterate(&cl); // all oops in perm gen | |
2478 } | |
2479 | |
2480 if (VerifyObjectStartArray) { | |
2481 // Verify the block offset table | |
2482 _bt.verify(); | |
2483 } | |
2484 } | |
2485 | |
2486 #ifndef PRODUCT | |
2487 void CompactibleFreeListSpace::verifyFreeLists() const { | |
2488 if (FLSVerifyLists) { | |
2489 _dictionary->verify(); | |
2490 verifyIndexedFreeLists(); | |
2491 } else { | |
2492 if (FLSVerifyDictionary) { | |
2493 _dictionary->verify(); | |
2494 } | |
2495 if (FLSVerifyIndexTable) { | |
2496 verifyIndexedFreeLists(); | |
2497 } | |
2498 } | |
2499 } | |
2500 #endif | |
2501 | |
2502 void CompactibleFreeListSpace::verifyIndexedFreeLists() const { | |
2503 size_t i = 0; | |
4068
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
2504 for (; i < IndexSetStart; i++) { |
0 | 2505 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL"); |
2506 } | |
2507 for (; i < IndexSetSize; i++) { | |
2508 verifyIndexedFreeList(i); | |
2509 } | |
2510 } | |
2511 | |
2512 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2513 FreeChunk* fc = _indexedFreeList[size].head(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2514 FreeChunk* tail = _indexedFreeList[size].tail(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2515 size_t num = _indexedFreeList[size].count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2516 size_t n = 0; |
4068
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
2517 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL, |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2518 "Slot should have been empty"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2519 for (; fc != NULL; fc = fc->next(), n++) { |
0 | 2520 guarantee(fc->size() == size, "Size inconsistency"); |
2521 guarantee(fc->isFree(), "!free?"); | |
2522 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list"); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2523 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail"); |
0 | 2524 } |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2525 guarantee(n == num, "Incorrect count"); |
0 | 2526 } |
2527 | |
2528 #ifndef PRODUCT | |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2529 void CompactibleFreeListSpace::check_free_list_consistency() const { |
0 | 2530 assert(_dictionary->minSize() <= IndexSetSize, |
2531 "Some sizes can't be allocated without recourse to" | |
2532 " linear allocation buffers"); | |
2533 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk), | |
2534 "else MIN_TREE_CHUNK_SIZE is wrong"); | |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2535 assert((IndexSetStride == 2 && IndexSetStart == 4) || // 32-bit |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2536 (IndexSetStride == 1 && IndexSetStart == 3), "just checking"); // 64-bit |
4068
5a5ed80bea5b
7105163: CMS: some mentions of MinChunkSize should be IndexSetStart
ysr
parents:
4024
diff
changeset
|
2537 assert((IndexSetStride != 2) || (IndexSetStart % 2 == 0), |
0 | 2538 "Some for-loops may be incorrectly initialized"); |
2539 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1), | |
2540 "For-loops that iterate over IndexSet with stride 2 may be wrong"); | |
2541 } | |
2542 #endif | |
2543 | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2544 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { |
0 | 2545 assert_lock_strong(&_freelistLock); |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2546 FreeList total; |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2547 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2548 FreeList::print_labels_on(gclog_or_tty, "size"); |
0 | 2549 size_t totalFree = 0; |
2550 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { | |
2551 const FreeList *fl = &_indexedFreeList[i]; | |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2552 totalFree += fl->count() * fl->size(); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2553 if (i % (40*IndexSetStride) == 0) { |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2554 FreeList::print_labels_on(gclog_or_tty, "size"); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2555 } |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2556 fl->print_on(gclog_or_tty); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2557 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2558 total.set_surplus( total.surplus() + fl->surplus() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2559 total.set_desired( total.desired() + fl->desired() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2560 total.set_prevSweep( total.prevSweep() + fl->prevSweep() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2561 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep()); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2562 total.set_count( total.count() + fl->count() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2563 total.set_coalBirths( total.coalBirths() + fl->coalBirths() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2564 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() ); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2565 total.set_splitBirths(total.splitBirths() + fl->splitBirths()); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2566 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths()); |
0 | 2567 } |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2568 total.print_on(gclog_or_tty, "TOTAL"); |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2569 gclog_or_tty->print_cr("Total free in indexed lists " |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2570 SIZE_FORMAT " words", totalFree); |
0 | 2571 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n", |
12
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2572 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/ |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2573 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0), |
6432c3bb6240
6668743: CMS: Consolidate block statistics reporting code
ysr
parents:
0
diff
changeset
|
2574 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); |
0 | 2575 _dictionary->printDictCensus(); |
2576 } | |
2577 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2578 /////////////////////////////////////////////////////////////////////////// |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2579 // CFLS_LAB |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2580 /////////////////////////////////////////////////////////////////////////// |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2581 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2582 #define VECTOR_257(x) \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2583 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2584 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2585 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2586 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2587 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2588 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2589 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2590 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2591 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2592 x } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2593 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2594 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_ |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2595 // OldPLABSize, whose static default is different; if overridden at the |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2596 // command-line, this will get reinitialized via a call to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2597 // modify_initialization() below. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2598 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] = |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2599 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim)); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2600 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0); |
4728
441e946dc1af
7121618: Change type of number of GC workers to unsigned int.
jmasa
parents:
4095
diff
changeset
|
2601 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0); |
0 | 2602 |
2603 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) : | |
2604 _cfls(cfls) | |
2605 { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2606 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above"); |
0 | 2607 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
2608 i < CompactibleFreeListSpace::IndexSetSize; | |
2609 i += CompactibleFreeListSpace::IndexSetStride) { | |
2610 _indexedFreeList[i].set_size(i); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2611 _num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2612 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2613 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2614 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2615 static bool _CFLS_LAB_modified = false; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2616 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2617 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2618 assert(!_CFLS_LAB_modified, "Call only once"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2619 _CFLS_LAB_modified = true; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2620 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2621 i < CompactibleFreeListSpace::IndexSetSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2622 i += CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2623 _blocks_to_claim[i].modify(n, wt, true /* force */); |
0 | 2624 } |
2625 } | |
2626 | |
2627 HeapWord* CFLS_LAB::alloc(size_t word_sz) { | |
2628 FreeChunk* res; | |
1777
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1716
diff
changeset
|
2629 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error"); |
0 | 2630 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { |
2631 // This locking manages sync with other large object allocations. | |
2632 MutexLockerEx x(_cfls->parDictionaryAllocLock(), | |
2633 Mutex::_no_safepoint_check_flag); | |
2634 res = _cfls->getChunkFromDictionaryExact(word_sz); | |
2635 if (res == NULL) return NULL; | |
2636 } else { | |
2637 FreeList* fl = &_indexedFreeList[word_sz]; | |
2638 if (fl->count() == 0) { | |
2639 // Attempt to refill this local free list. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2640 get_from_global_pool(word_sz, fl); |
0 | 2641 // If it didn't work, give up. |
2642 if (fl->count() == 0) return NULL; | |
2643 } | |
2644 res = fl->getChunkAtHead(); | |
2645 assert(res != NULL, "Why was count non-zero?"); | |
2646 } | |
2647 res->markNotFree(); | |
2648 assert(!res->isFree(), "shouldn't be marked free"); | |
187 | 2649 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized"); |
0 | 2650 // mangle a just allocated object with a distinct pattern. |
2651 debug_only(res->mangleAllocated(word_sz)); | |
2652 return (HeapWord*)res; | |
2653 } | |
2654 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2655 // Get a chunk of blocks of the right size and update related |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2656 // book-keeping stats |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2657 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2658 // Get the #blocks we want to claim |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2659 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2660 assert(n_blks > 0, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2661 assert(ResizePLAB || n_blks == OldPLABSize, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2662 // In some cases, when the application has a phase change, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2663 // there may be a sudden and sharp shift in the object survival |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2664 // profile, and updating the counts at the end of a scavenge |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2665 // may not be quick enough, giving rise to large scavenge pauses |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2666 // during these phase changes. It is beneficial to detect such |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2667 // changes on-the-fly during a scavenge and avoid such a phase-change |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2668 // pothole. The following code is a heuristic attempt to do that. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2669 // It is protected by a product flag until we have gained |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2670 // enough experience with this heuristic and fine-tuned its behaviour. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2671 // WARNING: This might increase fragmentation if we overreact to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2672 // small spikes, so some kind of historical smoothing based on |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2673 // previous experience with the greater reactivity might be useful. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2674 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2675 // default. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2676 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2677 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2678 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2679 n_blks = MIN2(n_blks, CMSOldPLABMax); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2680 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2681 assert(n_blks > 0, "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2682 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2683 // Update stats table entry for this block size |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2684 _num_blocks[word_sz] += fl->count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2685 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2686 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2687 void CFLS_LAB::compute_desired_plab_size() { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2688 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
0 | 2689 i < CompactibleFreeListSpace::IndexSetSize; |
2690 i += CompactibleFreeListSpace::IndexSetStride) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2691 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2692 "Counter inconsistency"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2693 if (_global_num_workers[i] > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2694 // Need to smooth wrt historical average |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2695 if (ResizeOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2696 _blocks_to_claim[i].sample( |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2697 MAX2((size_t)CMSOldPLABMin, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2698 MIN2((size_t)CMSOldPLABMax, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2699 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills)))); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2700 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2701 // Reset counters for next round |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2702 _global_num_workers[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2703 _global_num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2704 if (PrintOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2705 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2706 } |
0 | 2707 } |
2708 } | |
2709 } | |
2710 | |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2711 // If this is changed in the future to allow parallel |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2712 // access, one would need to take the FL locks and, |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2713 // depending on how it is used, stagger access from |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2714 // parallel threads to reduce contention. |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2715 void CFLS_LAB::retire(int tid) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2716 // We run this single threaded with the world stopped; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2717 // so no need for locks and such. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2718 NOT_PRODUCT(Thread* t = Thread::current();) |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2719 assert(Thread::current()->is_VM_thread(), "Error"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2720 for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2721 i < CompactibleFreeListSpace::IndexSetSize; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2722 i += CompactibleFreeListSpace::IndexSetStride) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2723 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2724 "Can't retire more than what we obtained"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2725 if (_num_blocks[i] > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2726 size_t num_retire = _indexedFreeList[i].count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2727 assert(_num_blocks[i] > num_retire, "Should have used at least one"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2728 { |
4024
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2729 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2730 // Mutex::_no_safepoint_check_flag); |
c08412904149
7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
ysr
parents:
3761
diff
changeset
|
2731 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2732 // Update globals stats for num_blocks used |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2733 _global_num_blocks[i] += (_num_blocks[i] - num_retire); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2734 _global_num_workers[i]++; |
4728
441e946dc1af
7121618: Change type of number of GC workers to unsigned int.
jmasa
parents:
4095
diff
changeset
|
2735 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2736 if (num_retire > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2737 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2738 // Reset this list. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2739 _indexedFreeList[i] = FreeList(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2740 _indexedFreeList[i].set_size(i); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2741 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2742 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2743 if (PrintOldPLAB) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2744 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2745 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2746 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2747 // Reset stats for next round |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2748 _num_blocks[i] = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2749 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2750 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2751 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2752 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2753 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) { |
0 | 2754 assert(fl->count() == 0, "Precondition."); |
2755 assert(word_sz < CompactibleFreeListSpace::IndexSetSize, | |
2756 "Precondition"); | |
2757 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2758 // We'll try all multiples of word_sz in the indexed set, starting with |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2759 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2760 // then try getting a big chunk and splitting it. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2761 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2762 bool found; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2763 int k; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2764 size_t cur_sz; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2765 for (k = 1, cur_sz = k * word_sz, found = false; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2766 (cur_sz < CompactibleFreeListSpace::IndexSetSize) && |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2767 (CMSSplitIndexedFreeListBlocks || k <= 1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2768 k++, cur_sz = k * word_sz) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2769 FreeList fl_for_cur_sz; // Empty. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2770 fl_for_cur_sz.set_size(cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2771 { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2772 MutexLockerEx x(_indexedFreeListParLocks[cur_sz], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2773 Mutex::_no_safepoint_check_flag); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2774 FreeList* gfl = &_indexedFreeList[cur_sz]; |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2775 if (gfl->count() != 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2776 // nn is the number of chunks of size cur_sz that |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2777 // we'd need to split k-ways each, in order to create |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2778 // "n" chunks of size word_sz each. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2779 const size_t nn = MAX2(n/k, (size_t)1); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2780 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2781 found = true; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2782 if (k > 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2783 // Update split death stats for the cur_sz-size blocks list: |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2784 // we increment the split death count by the number of blocks |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2785 // we just took from the cur_sz-size blocks list and which |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2786 // we will be splitting below. |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2787 ssize_t deaths = gfl->splitDeaths() + |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2788 fl_for_cur_sz.count(); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2789 gfl->set_splitDeaths(deaths); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2790 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2791 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2792 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2793 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2794 if (found) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2795 if (k == 1) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2796 fl->prepend(&fl_for_cur_sz); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2797 } else { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2798 // Divide each block on fl_for_cur_sz up k ways. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2799 FreeChunk* fc; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2800 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2801 // Must do this in reverse order, so that anybody attempting to |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2802 // access the main chunk sees it as a single free block until we |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2803 // change it. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2804 size_t fc_size = fc->size(); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2805 assert(fc->isFree(), "Error"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2806 for (int i = k-1; i >= 0; i--) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2807 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2808 assert((i != 0) || |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2809 ((fc == ffc) && ffc->isFree() && |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2810 (ffc->size() == k*word_sz) && (fc_size == word_sz)), |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2811 "Counting error"); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2812 ffc->setSize(word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2813 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2814 ffc->linkNext(NULL); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2815 // Above must occur before BOT is updated below. |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2816 OrderAccess::storestore(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2817 // splitting from the right, fc_size == i * word_sz |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2818 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2819 fc_size -= word_sz; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2820 assert(fc_size == i*word_sz, "Error"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2821 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2822 _bt.verify_single_block((HeapWord*)fc, fc_size); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2823 _bt.verify_single_block((HeapWord*)ffc, word_sz); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2824 // Push this on "fl". |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2825 fl->returnChunkAtHead(ffc); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2826 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2827 // TRAP |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2828 assert(fl->tail()->next() == NULL, "List invariant."); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2829 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2830 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2831 // Update birth stats for this block size. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2832 size_t num = fl->count(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2833 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2834 Mutex::_no_safepoint_check_flag); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2835 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2836 _indexedFreeList[word_sz].set_splitBirths(births); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2837 return; |
0 | 2838 } |
2839 } | |
2840 } | |
2841 // Otherwise, we'll split a block from the dictionary. | |
2842 FreeChunk* fc = NULL; | |
2843 FreeChunk* rem_fc = NULL; | |
2844 size_t rem; | |
2845 { | |
2846 MutexLockerEx x(parDictionaryAllocLock(), | |
2847 Mutex::_no_safepoint_check_flag); | |
2848 while (n > 0) { | |
2849 fc = dictionary()->getChunk(MAX2(n * word_sz, | |
2850 _dictionary->minSize()), | |
2851 FreeBlockDictionary::atLeast); | |
2852 if (fc != NULL) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2853 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk |
0 | 2854 dictionary()->dictCensusUpdate(fc->size(), |
2855 true /*split*/, | |
2856 false /*birth*/); | |
2857 break; | |
2858 } else { | |
2859 n--; | |
2860 } | |
2861 } | |
2862 if (fc == NULL) return; | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2863 // Otherwise, split up that block. |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2864 assert((ssize_t)n >= 1, "Control point invariant"); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2865 assert(fc->isFree(), "Error: should be a free block"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2866 _bt.verify_single_block((HeapWord*)fc, fc->size()); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2867 const size_t nn = fc->size() / word_sz; |
0 | 2868 n = MIN2(nn, n); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2869 assert((ssize_t)n >= 1, "Control point invariant"); |
0 | 2870 rem = fc->size() - n * word_sz; |
2871 // If there is a remainder, and it's too small, allocate one fewer. | |
2872 if (rem > 0 && rem < MinChunkSize) { | |
2873 n--; rem += word_sz; | |
2874 } | |
1148
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2875 // Note that at this point we may have n == 0. |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2876 assert((ssize_t)n >= 0, "Control point invariant"); |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2877 |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2878 // If n is 0, the chunk fc that was found is not large |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2879 // enough to leave a viable remainder. We are unable to |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2880 // allocate even one block. Return fc to the |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2881 // dictionary and return, leaving "fl" empty. |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2882 if (n == 0) { |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2883 returnChunkToDictionary(fc); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2884 assert(fl->count() == 0, "We never allocated any blocks"); |
1148
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2885 return; |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2886 } |
05b775309e59
6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
jmasa
parents:
1145
diff
changeset
|
2887 |
0 | 2888 // First return the remainder, if any. |
2889 // Note that we hold the lock until we decide if we're going to give | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2890 // back the remainder to the dictionary, since a concurrent allocation |
0 | 2891 // may otherwise see the heap as empty. (We're willing to take that |
2892 // hit if the block is a small block.) | |
2893 if (rem > 0) { | |
2894 size_t prefix_size = n * word_sz; | |
2895 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); | |
2896 rem_fc->setSize(rem); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2897 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
0 | 2898 rem_fc->linkNext(NULL); |
2899 // Above must occur before BOT is updated below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2900 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error"); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2901 OrderAccess::storestore(); |
0 | 2902 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2903 assert(fc->isFree(), "Error"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2904 fc->setSize(prefix_size); |
0 | 2905 if (rem >= IndexSetSize) { |
2906 returnChunkToDictionary(rem_fc); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2907 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/); |
0 | 2908 rem_fc = NULL; |
2909 } | |
2910 // Otherwise, return it to the small list below. | |
2911 } | |
2912 } | |
2913 if (rem_fc != NULL) { | |
2914 MutexLockerEx x(_indexedFreeListParLocks[rem], | |
2915 Mutex::_no_safepoint_check_flag); | |
2916 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size()); | |
2917 _indexedFreeList[rem].returnChunkAtHead(rem_fc); | |
2918 smallSplitBirth(rem); | |
2919 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2920 assert((ssize_t)n > 0 && fc != NULL, "Consistency"); |
0 | 2921 // Now do the splitting up. |
2922 // Must do this in reverse order, so that anybody attempting to | |
2923 // access the main chunk sees it as a single free block until we | |
2924 // change it. | |
2925 size_t fc_size = n * word_sz; | |
2926 // All but first chunk in this loop | |
2927 for (ssize_t i = n-1; i > 0; i--) { | |
2928 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); | |
2929 ffc->setSize(word_sz); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2930 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. |
0 | 2931 ffc->linkNext(NULL); |
2932 // Above must occur before BOT is updated below. | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2933 OrderAccess::storestore(); |
0 | 2934 // splitting from the right, fc_size == (n - i + 1) * wordsize |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2935 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); |
0 | 2936 fc_size -= word_sz; |
2937 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); | |
2938 _bt.verify_single_block((HeapWord*)ffc, ffc->size()); | |
2939 _bt.verify_single_block((HeapWord*)fc, fc_size); | |
2940 // Push this on "fl". | |
2941 fl->returnChunkAtHead(ffc); | |
2942 } | |
2943 // First chunk | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2944 assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2945 // The blocks above should show their new sizes before the first block below |
0 | 2946 fc->setSize(word_sz); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1579
diff
changeset
|
2947 fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above |
0 | 2948 fc->linkNext(NULL); |
2949 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); | |
2950 _bt.verify_single_block((HeapWord*)fc, fc->size()); | |
2951 fl->returnChunkAtHead(fc); | |
2952 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2953 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks"); |
0 | 2954 { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2955 // Update the stats for this block size. |
0 | 2956 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
2957 Mutex::_no_safepoint_check_flag); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2958 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2959 _indexedFreeList[word_sz].set_splitBirths(births); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2960 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
2961 // _indexedFreeList[word_sz].set_surplus(new_surplus); |
0 | 2962 } |
2963 | |
2964 // TRAP | |
2965 assert(fl->tail()->next() == NULL, "List invariant."); | |
2966 } | |
2967 | |
2968 // Set up the space's par_seq_tasks structure for work claiming | |
2969 // for parallel rescan. See CMSParRemarkTask where this is currently used. | |
2970 // XXX Need to suitably abstract and generalize this and the next | |
2971 // method into one. | |
2972 void | |
2973 CompactibleFreeListSpace:: | |
2974 initialize_sequential_subtasks_for_rescan(int n_threads) { | |
2975 // The "size" of each task is fixed according to rescan_task_size. | |
2976 assert(n_threads > 0, "Unexpected n_threads argument"); | |
2977 const size_t task_size = rescan_task_size(); | |
2978 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size; | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2979 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2980 assert(n_tasks == 0 || |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2981 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) && |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2982 (used_region().start() + n_tasks*task_size >= used_region().end())), |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
2983 "n_tasks calculation incorrect"); |
0 | 2984 SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
2985 assert(!pst->valid(), "Clobbering existing data?"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2986 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2987 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2988 pst->set_n_threads(n_threads); |
0 | 2989 pst->set_n_tasks((int)n_tasks); |
2990 } | |
2991 | |
2992 // Set up the space's par_seq_tasks structure for work claiming | |
2993 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used. | |
2994 void | |
2995 CompactibleFreeListSpace:: | |
2996 initialize_sequential_subtasks_for_marking(int n_threads, | |
2997 HeapWord* low) { | |
2998 // The "size" of each task is fixed according to rescan_task_size. | |
2999 assert(n_threads > 0, "Unexpected n_threads argument"); | |
3000 const size_t task_size = marking_task_size(); | |
3001 assert(task_size > CardTableModRefBS::card_size_in_words && | |
3002 (task_size % CardTableModRefBS::card_size_in_words == 0), | |
3003 "Otherwise arithmetic below would be incorrect"); | |
3004 MemRegion span = _gen->reserved(); | |
3005 if (low != NULL) { | |
3006 if (span.contains(low)) { | |
3007 // Align low down to a card boundary so that | |
3008 // we can use block_offset_careful() on span boundaries. | |
3009 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low, | |
3010 CardTableModRefBS::card_size); | |
3011 // Clip span prefix at aligned_low | |
3012 span = span.intersection(MemRegion(aligned_low, span.end())); | |
3013 } else if (low > span.end()) { | |
3014 span = MemRegion(low, low); // Null region | |
3015 } // else use entire span | |
3016 } | |
3017 assert(span.is_empty() || | |
3018 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0), | |
3019 "span should start at a card boundary"); | |
3020 size_t n_tasks = (span.word_size() + task_size - 1)/task_size; | |
3021 assert((n_tasks == 0) == span.is_empty(), "Inconsistency"); | |
3022 assert(n_tasks == 0 || | |
3023 ((span.start() + (n_tasks - 1)*task_size < span.end()) && | |
3024 (span.start() + n_tasks*task_size >= span.end())), | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
269
diff
changeset
|
3025 "n_tasks calculation incorrect"); |
0 | 3026 SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
3027 assert(!pst->valid(), "Clobbering existing data?"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3028 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3029 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3030 pst->set_n_threads(n_threads); |
0 | 3031 pst->set_n_tasks((int)n_tasks); |
3032 } |