0
|
1 /*
|
|
2 * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 # include "incls/_precompiled.incl"
|
|
26 # include "incls/_compactibleFreeListSpace.cpp.incl"
|
|
27
|
|
28 /////////////////////////////////////////////////////////////////////////
|
|
29 //// CompactibleFreeListSpace
|
|
30 /////////////////////////////////////////////////////////////////////////
|
|
31
|
|
32 // highest ranked free list lock rank
|
|
33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
|
|
34
|
|
35 // Constructor
|
|
36 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
|
37 MemRegion mr, bool use_adaptive_freelists,
|
|
38 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
|
|
39 _dictionaryChoice(dictionaryChoice),
|
|
40 _adaptive_freelists(use_adaptive_freelists),
|
|
41 _bt(bs, mr),
|
|
42 // free list locks are in the range of values taken by _lockRank
|
|
43 // This range currently is [_leaf+2, _leaf+3]
|
|
44 // Note: this requires that CFLspace c'tors
|
|
45 // are called serially in the order in which the locks are
|
|
46 // are acquired in the program text. This is true today.
|
|
47 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
|
|
48 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
|
|
49 "CompactibleFreeListSpace._dict_par_lock", true),
|
|
50 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
|
|
51 CMSRescanMultiple),
|
|
52 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
|
|
53 CMSConcMarkMultiple),
|
|
54 _collector(NULL)
|
|
55 {
|
|
56 _bt.set_space(this);
|
|
57 initialize(mr, true);
|
|
58 // We have all of "mr", all of which we place in the dictionary
|
|
59 // as one big chunk. We'll need to decide here which of several
|
|
60 // possible alternative dictionary implementations to use. For
|
|
61 // now the choice is easy, since we have only one working
|
|
62 // implementation, namely, the simple binary tree (splaying
|
|
63 // temporarily disabled).
|
|
64 switch (dictionaryChoice) {
|
|
65 case FreeBlockDictionary::dictionaryBinaryTree:
|
|
66 _dictionary = new BinaryTreeDictionary(mr);
|
|
67 break;
|
|
68 case FreeBlockDictionary::dictionarySplayTree:
|
|
69 case FreeBlockDictionary::dictionarySkipList:
|
|
70 default:
|
|
71 warning("dictionaryChoice: selected option not understood; using"
|
|
72 " default BinaryTreeDictionary implementation instead.");
|
|
73 _dictionary = new BinaryTreeDictionary(mr);
|
|
74 break;
|
|
75 }
|
|
76 splitBirth(mr.word_size());
|
|
77 assert(_dictionary != NULL, "CMS dictionary initialization");
|
|
78 // The indexed free lists are initially all empty and are lazily
|
|
79 // filled in on demand. Initialize the array elements to NULL.
|
|
80 initializeIndexedFreeListArray();
|
|
81
|
|
82 // Not using adaptive free lists assumes that allocation is first
|
|
83 // from the linAB's. Also a cms perm gen which can be compacted
|
|
84 // has to have the klass's klassKlass allocated at a lower
|
|
85 // address in the heap than the klass so that the klassKlass is
|
|
86 // moved to its new location before the klass is moved.
|
|
87 // Set the _refillSize for the linear allocation blocks
|
|
88 if (!use_adaptive_freelists) {
|
|
89 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
|
|
90 // The small linAB initially has all the space and will allocate
|
|
91 // a chunk of any size.
|
|
92 HeapWord* addr = (HeapWord*) fc;
|
|
93 _smallLinearAllocBlock.set(addr, fc->size() ,
|
|
94 1024*SmallForLinearAlloc, fc->size());
|
|
95 // Note that _unallocated_block is not updated here.
|
|
96 // Allocations from the linear allocation block should
|
|
97 // update it.
|
|
98 } else {
|
|
99 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
|
|
100 SmallForLinearAlloc);
|
|
101 }
|
|
102 // CMSIndexedFreeListReplenish should be at least 1
|
|
103 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
|
|
104 _promoInfo.setSpace(this);
|
|
105 if (UseCMSBestFit) {
|
|
106 _fitStrategy = FreeBlockBestFitFirst;
|
|
107 } else {
|
|
108 _fitStrategy = FreeBlockStrategyNone;
|
|
109 }
|
|
110 checkFreeListConsistency();
|
|
111
|
|
112 // Initialize locks for parallel case.
|
|
113 if (ParallelGCThreads > 0) {
|
|
114 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
|
115 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
|
|
116 "a freelist par lock",
|
|
117 true);
|
|
118 if (_indexedFreeListParLocks[i] == NULL)
|
|
119 vm_exit_during_initialization("Could not allocate a par lock");
|
|
120 DEBUG_ONLY(
|
|
121 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
|
|
122 )
|
|
123 }
|
|
124 _dictionary->set_par_lock(&_parDictionaryAllocLock);
|
|
125 }
|
|
126 }
|
|
127
|
|
128 // Like CompactibleSpace forward() but always calls cross_threshold() to
|
|
129 // update the block offset table. Removed initialize_threshold call because
|
|
130 // CFLS does not use a block offset array for contiguous spaces.
|
|
131 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
|
|
132 CompactPoint* cp, HeapWord* compact_top) {
|
|
133 // q is alive
|
|
134 // First check if we should switch compaction space
|
|
135 assert(this == cp->space, "'this' should be current compaction space.");
|
|
136 size_t compaction_max_size = pointer_delta(end(), compact_top);
|
|
137 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
|
|
138 "virtual adjustObjectSize_v() method is not correct");
|
|
139 size_t adjusted_size = adjustObjectSize(size);
|
|
140 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
|
|
141 "no small fragments allowed");
|
|
142 assert(minimum_free_block_size() == MinChunkSize,
|
|
143 "for de-virtualized reference below");
|
|
144 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
|
|
145 if (adjusted_size + MinChunkSize > compaction_max_size &&
|
|
146 adjusted_size != compaction_max_size) {
|
|
147 do {
|
|
148 // switch to next compaction space
|
|
149 cp->space->set_compaction_top(compact_top);
|
|
150 cp->space = cp->space->next_compaction_space();
|
|
151 if (cp->space == NULL) {
|
|
152 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
|
|
153 assert(cp->gen != NULL, "compaction must succeed");
|
|
154 cp->space = cp->gen->first_compaction_space();
|
|
155 assert(cp->space != NULL, "generation must have a first compaction space");
|
|
156 }
|
|
157 compact_top = cp->space->bottom();
|
|
158 cp->space->set_compaction_top(compact_top);
|
|
159 // The correct adjusted_size may not be the same as that for this method
|
|
160 // (i.e., cp->space may no longer be "this" so adjust the size again.
|
|
161 // Use the virtual method which is not used above to save the virtual
|
|
162 // dispatch.
|
|
163 adjusted_size = cp->space->adjust_object_size_v(size);
|
|
164 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
|
|
165 assert(cp->space->minimum_free_block_size() == 0, "just checking");
|
|
166 } while (adjusted_size > compaction_max_size);
|
|
167 }
|
|
168
|
|
169 // store the forwarding pointer into the mark word
|
|
170 if ((HeapWord*)q != compact_top) {
|
|
171 q->forward_to(oop(compact_top));
|
|
172 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
|
|
173 } else {
|
|
174 // if the object isn't moving we can just set the mark to the default
|
|
175 // mark and handle it specially later on.
|
|
176 q->init_mark();
|
|
177 assert(q->forwardee() == NULL, "should be forwarded to NULL");
|
|
178 }
|
|
179
|
|
180 debug_only(MarkSweep::register_live_oop(q, adjusted_size));
|
|
181 compact_top += adjusted_size;
|
|
182
|
|
183 // we need to update the offset table so that the beginnings of objects can be
|
|
184 // found during scavenge. Note that we are updating the offset table based on
|
|
185 // where the object will be once the compaction phase finishes.
|
|
186
|
|
187 // Always call cross_threshold(). A contiguous space can only call it when
|
|
188 // the compaction_top exceeds the current threshold but not for an
|
|
189 // non-contiguous space.
|
|
190 cp->threshold =
|
|
191 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
|
|
192 return compact_top;
|
|
193 }
|
|
194
|
|
195 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
|
|
196 // and use of single_block instead of alloc_block. The name here is not really
|
|
197 // appropriate - maybe a more general name could be invented for both the
|
|
198 // contiguous and noncontiguous spaces.
|
|
199
|
|
200 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
|
|
201 _bt.single_block(start, the_end);
|
|
202 return end();
|
|
203 }
|
|
204
|
|
205 // Initialize them to NULL.
|
|
206 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
|
|
207 for (size_t i = 0; i < IndexSetSize; i++) {
|
|
208 // Note that on platforms where objects are double word aligned,
|
|
209 // the odd array elements are not used. It is convenient, however,
|
|
210 // to map directly from the object size to the array element.
|
|
211 _indexedFreeList[i].reset(IndexSetSize);
|
|
212 _indexedFreeList[i].set_size(i);
|
|
213 assert(_indexedFreeList[i].count() == 0, "reset check failed");
|
|
214 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
|
|
215 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
|
|
216 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
|
|
217 }
|
|
218 }
|
|
219
|
|
220 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
|
|
221 for (int i = 1; i < IndexSetSize; i++) {
|
|
222 assert(_indexedFreeList[i].size() == (size_t) i,
|
|
223 "Indexed free list sizes are incorrect");
|
|
224 _indexedFreeList[i].reset(IndexSetSize);
|
|
225 assert(_indexedFreeList[i].count() == 0, "reset check failed");
|
|
226 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
|
|
227 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
|
|
228 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
|
|
229 }
|
|
230 }
|
|
231
|
|
232 void CompactibleFreeListSpace::reset(MemRegion mr) {
|
|
233 resetIndexedFreeListArray();
|
|
234 dictionary()->reset();
|
|
235 if (BlockOffsetArrayUseUnallocatedBlock) {
|
|
236 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
|
|
237 // Everything's allocated until proven otherwise.
|
|
238 _bt.set_unallocated_block(end());
|
|
239 }
|
|
240 if (!mr.is_empty()) {
|
|
241 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
|
|
242 _bt.single_block(mr.start(), mr.word_size());
|
|
243 FreeChunk* fc = (FreeChunk*) mr.start();
|
|
244 fc->setSize(mr.word_size());
|
|
245 if (mr.word_size() >= IndexSetSize ) {
|
|
246 returnChunkToDictionary(fc);
|
|
247 } else {
|
|
248 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
|
|
249 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
|
|
250 }
|
|
251 }
|
|
252 _promoInfo.reset();
|
|
253 _smallLinearAllocBlock._ptr = NULL;
|
|
254 _smallLinearAllocBlock._word_size = 0;
|
|
255 }
|
|
256
|
|
257 void CompactibleFreeListSpace::reset_after_compaction() {
|
|
258 // Reset the space to the new reality - one free chunk.
|
|
259 MemRegion mr(compaction_top(), end());
|
|
260 reset(mr);
|
|
261 // Now refill the linear allocation block(s) if possible.
|
|
262 if (_adaptive_freelists) {
|
|
263 refillLinearAllocBlocksIfNeeded();
|
|
264 } else {
|
|
265 // Place as much of mr in the linAB as we can get,
|
|
266 // provided it was big enough to go into the dictionary.
|
|
267 FreeChunk* fc = dictionary()->findLargestDict();
|
|
268 if (fc != NULL) {
|
|
269 assert(fc->size() == mr.word_size(),
|
|
270 "Why was the chunk broken up?");
|
|
271 removeChunkFromDictionary(fc);
|
|
272 HeapWord* addr = (HeapWord*) fc;
|
|
273 _smallLinearAllocBlock.set(addr, fc->size() ,
|
|
274 1024*SmallForLinearAlloc, fc->size());
|
|
275 // Note that _unallocated_block is not updated here.
|
|
276 }
|
|
277 }
|
|
278 }
|
|
279
|
|
280 // Walks the entire dictionary, returning a coterminal
|
|
281 // chunk, if it exists. Use with caution since it involves
|
|
282 // a potentially complete walk of a potentially large tree.
|
|
283 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
|
|
284
|
|
285 assert_lock_strong(&_freelistLock);
|
|
286
|
|
287 return dictionary()->find_chunk_ends_at(end());
|
|
288 }
|
|
289
|
|
290
|
|
291 #ifndef PRODUCT
|
|
292 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
|
|
293 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
|
294 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
|
|
295 }
|
|
296 }
|
|
297
|
|
298 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
|
|
299 size_t sum = 0;
|
|
300 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
|
301 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
|
|
302 }
|
|
303 return sum;
|
|
304 }
|
|
305
|
|
306 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
|
|
307 size_t count = 0;
|
|
308 for (int i = MinChunkSize; i < IndexSetSize; i++) {
|
|
309 debug_only(
|
|
310 ssize_t total_list_count = 0;
|
|
311 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
|
|
312 fc = fc->next()) {
|
|
313 total_list_count++;
|
|
314 }
|
|
315 assert(total_list_count == _indexedFreeList[i].count(),
|
|
316 "Count in list is incorrect");
|
|
317 )
|
|
318 count += _indexedFreeList[i].count();
|
|
319 }
|
|
320 return count;
|
|
321 }
|
|
322
|
|
323 size_t CompactibleFreeListSpace::totalCount() {
|
|
324 size_t num = totalCountInIndexedFreeLists();
|
|
325 num += dictionary()->totalCount();
|
|
326 if (_smallLinearAllocBlock._word_size != 0) {
|
|
327 num++;
|
|
328 }
|
|
329 return num;
|
|
330 }
|
|
331 #endif
|
|
332
|
|
333 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
|
|
334 FreeChunk* fc = (FreeChunk*) p;
|
|
335 return fc->isFree();
|
|
336 }
|
|
337
|
|
338 size_t CompactibleFreeListSpace::used() const {
|
|
339 return capacity() - free();
|
|
340 }
|
|
341
|
|
342 size_t CompactibleFreeListSpace::free() const {
|
|
343 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
|
|
344 // if you do this while the structures are in flux you
|
|
345 // may get an approximate answer only; for instance
|
|
346 // because there is concurrent allocation either
|
|
347 // directly by mutators or for promotion during a GC.
|
|
348 // It's "MT-safe", however, in the sense that you are guaranteed
|
|
349 // not to crash and burn, for instance, because of walking
|
|
350 // pointers that could disappear as you were walking them.
|
|
351 // The approximation is because the various components
|
|
352 // that are read below are not read atomically (and
|
|
353 // further the computation of totalSizeInIndexedFreeLists()
|
|
354 // is itself a non-atomic computation. The normal use of
|
|
355 // this is during a resize operation at the end of GC
|
|
356 // and at that time you are guaranteed to get the
|
|
357 // correct actual value. However, for instance, this is
|
|
358 // also read completely asynchronously by the "perf-sampler"
|
|
359 // that supports jvmstat, and you are apt to see the values
|
|
360 // flicker in such cases.
|
|
361 assert(_dictionary != NULL, "No _dictionary?");
|
|
362 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
|
|
363 totalSizeInIndexedFreeLists() +
|
|
364 _smallLinearAllocBlock._word_size) * HeapWordSize;
|
|
365 }
|
|
366
|
|
367 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
|
|
368 assert(_dictionary != NULL, "No _dictionary?");
|
|
369 assert_locked();
|
|
370 size_t res = _dictionary->maxChunkSize();
|
|
371 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
|
|
372 (size_t) SmallForLinearAlloc - 1));
|
|
373 // XXX the following could potentially be pretty slow;
|
|
374 // should one, pesimally for the rare cases when res
|
|
375 // caclulated above is less than IndexSetSize,
|
|
376 // just return res calculated above? My reasoning was that
|
|
377 // those cases will be so rare that the extra time spent doesn't
|
|
378 // really matter....
|
|
379 // Note: do not change the loop test i >= res + IndexSetStride
|
|
380 // to i > res below, because i is unsigned and res may be zero.
|
|
381 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
|
|
382 i -= IndexSetStride) {
|
|
383 if (_indexedFreeList[i].head() != NULL) {
|
|
384 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
|
|
385 return i;
|
|
386 }
|
|
387 }
|
|
388 return res;
|
|
389 }
|
|
390
|
|
391 void CompactibleFreeListSpace::reportFreeListStatistics() const {
|
|
392 assert_lock_strong(&_freelistLock);
|
|
393 assert(PrintFLSStatistics != 0, "Reporting error");
|
|
394 _dictionary->reportStatistics();
|
|
395 if (PrintFLSStatistics > 1) {
|
|
396 reportIndexedFreeListStatistics();
|
|
397 size_t totalSize = totalSizeInIndexedFreeLists() +
|
|
398 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
|
|
399 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
|
|
400 }
|
|
401 }
|
|
402
|
|
403 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
|
|
404 assert_lock_strong(&_freelistLock);
|
|
405 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
|
|
406 "--------------------------------\n");
|
|
407 size_t totalSize = totalSizeInIndexedFreeLists();
|
|
408 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
|
|
409 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
|
|
410 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
|
|
411 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
|
|
412 if (freeBlocks != 0) {
|
|
413 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
|
|
414 }
|
|
415 }
|
|
416
|
|
417 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
|
|
418 size_t res = 0;
|
|
419 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
|
420 debug_only(
|
|
421 ssize_t recount = 0;
|
|
422 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
|
|
423 fc = fc->next()) {
|
|
424 recount += 1;
|
|
425 }
|
|
426 assert(recount == _indexedFreeList[i].count(),
|
|
427 "Incorrect count in list");
|
|
428 )
|
|
429 res += _indexedFreeList[i].count();
|
|
430 }
|
|
431 return res;
|
|
432 }
|
|
433
|
|
434 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
|
|
435 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
|
|
436 if (_indexedFreeList[i].head() != NULL) {
|
|
437 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
|
|
438 return (size_t)i;
|
|
439 }
|
|
440 }
|
|
441 return 0;
|
|
442 }
|
|
443
|
|
444 void CompactibleFreeListSpace::set_end(HeapWord* value) {
|
|
445 HeapWord* prevEnd = end();
|
|
446 assert(prevEnd != value, "unnecessary set_end call");
|
|
447 assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
|
|
448 _end = value;
|
|
449 if (prevEnd != NULL) {
|
|
450 // Resize the underlying block offset table.
|
|
451 _bt.resize(pointer_delta(value, bottom()));
|
|
452 if (value <= prevEnd) {
|
|
453 assert(value >= unallocated_block(), "New end is below unallocated block");
|
|
454 } else {
|
|
455 // Now, take this new chunk and add it to the free blocks.
|
|
456 // Note that the BOT has not yet been updated for this block.
|
|
457 size_t newFcSize = pointer_delta(value, prevEnd);
|
|
458 // XXX This is REALLY UGLY and should be fixed up. XXX
|
|
459 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
|
|
460 // Mark the boundary of the new block in BOT
|
|
461 _bt.mark_block(prevEnd, value);
|
|
462 // put it all in the linAB
|
|
463 if (ParallelGCThreads == 0) {
|
|
464 _smallLinearAllocBlock._ptr = prevEnd;
|
|
465 _smallLinearAllocBlock._word_size = newFcSize;
|
|
466 repairLinearAllocBlock(&_smallLinearAllocBlock);
|
|
467 } else { // ParallelGCThreads > 0
|
|
468 MutexLockerEx x(parDictionaryAllocLock(),
|
|
469 Mutex::_no_safepoint_check_flag);
|
|
470 _smallLinearAllocBlock._ptr = prevEnd;
|
|
471 _smallLinearAllocBlock._word_size = newFcSize;
|
|
472 repairLinearAllocBlock(&_smallLinearAllocBlock);
|
|
473 }
|
|
474 // Births of chunks put into a LinAB are not recorded. Births
|
|
475 // of chunks as they are allocated out of a LinAB are.
|
|
476 } else {
|
|
477 // Add the block to the free lists, if possible coalescing it
|
|
478 // with the last free block, and update the BOT and census data.
|
|
479 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
|
|
480 }
|
|
481 }
|
|
482 }
|
|
483 }
|
|
484
|
|
485 class FreeListSpace_DCTOC : public Filtering_DCTOC {
|
|
486 CompactibleFreeListSpace* _cfls;
|
|
487 CMSCollector* _collector;
|
|
488 protected:
|
|
489 // Override.
|
|
490 #define walk_mem_region_with_cl_DECL(ClosureType) \
|
|
491 virtual void walk_mem_region_with_cl(MemRegion mr, \
|
|
492 HeapWord* bottom, HeapWord* top, \
|
|
493 ClosureType* cl); \
|
|
494 void walk_mem_region_with_cl_par(MemRegion mr, \
|
|
495 HeapWord* bottom, HeapWord* top, \
|
|
496 ClosureType* cl); \
|
|
497 void walk_mem_region_with_cl_nopar(MemRegion mr, \
|
|
498 HeapWord* bottom, HeapWord* top, \
|
|
499 ClosureType* cl)
|
|
500 walk_mem_region_with_cl_DECL(OopClosure);
|
|
501 walk_mem_region_with_cl_DECL(FilteringClosure);
|
|
502
|
|
503 public:
|
|
504 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
|
|
505 CMSCollector* collector,
|
|
506 OopClosure* cl,
|
|
507 CardTableModRefBS::PrecisionStyle precision,
|
|
508 HeapWord* boundary) :
|
|
509 Filtering_DCTOC(sp, cl, precision, boundary),
|
|
510 _cfls(sp), _collector(collector) {}
|
|
511 };
|
|
512
|
|
513 // We de-virtualize the block-related calls below, since we know that our
|
|
514 // space is a CompactibleFreeListSpace.
|
|
515 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
|
|
516 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
|
|
517 HeapWord* bottom, \
|
|
518 HeapWord* top, \
|
|
519 ClosureType* cl) { \
|
|
520 if (SharedHeap::heap()->n_par_threads() > 0) { \
|
|
521 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
|
|
522 } else { \
|
|
523 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
|
|
524 } \
|
|
525 } \
|
|
526 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
|
|
527 HeapWord* bottom, \
|
|
528 HeapWord* top, \
|
|
529 ClosureType* cl) { \
|
|
530 /* Skip parts that are before "mr", in case "block_start" sent us \
|
|
531 back too far. */ \
|
|
532 HeapWord* mr_start = mr.start(); \
|
|
533 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
|
|
534 HeapWord* next = bottom + bot_size; \
|
|
535 while (next < mr_start) { \
|
|
536 bottom = next; \
|
|
537 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
|
|
538 next = bottom + bot_size; \
|
|
539 } \
|
|
540 \
|
|
541 while (bottom < top) { \
|
|
542 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
|
|
543 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
|
|
544 oop(bottom)) && \
|
|
545 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
|
|
546 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
|
|
547 bottom += _cfls->adjustObjectSize(word_sz); \
|
|
548 } else { \
|
|
549 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
|
|
550 } \
|
|
551 } \
|
|
552 } \
|
|
553 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
|
|
554 HeapWord* bottom, \
|
|
555 HeapWord* top, \
|
|
556 ClosureType* cl) { \
|
|
557 /* Skip parts that are before "mr", in case "block_start" sent us \
|
|
558 back too far. */ \
|
|
559 HeapWord* mr_start = mr.start(); \
|
|
560 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
|
|
561 HeapWord* next = bottom + bot_size; \
|
|
562 while (next < mr_start) { \
|
|
563 bottom = next; \
|
|
564 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
|
|
565 next = bottom + bot_size; \
|
|
566 } \
|
|
567 \
|
|
568 while (bottom < top) { \
|
|
569 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
|
|
570 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
|
|
571 oop(bottom)) && \
|
|
572 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
|
|
573 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
|
|
574 bottom += _cfls->adjustObjectSize(word_sz); \
|
|
575 } else { \
|
|
576 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
|
|
577 } \
|
|
578 } \
|
|
579 }
|
|
580
|
|
581 // (There are only two of these, rather than N, because the split is due
|
|
582 // only to the introduction of the FilteringClosure, a local part of the
|
|
583 // impl of this abstraction.)
|
|
584 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
|
|
585 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
|
|
586
|
|
587 DirtyCardToOopClosure*
|
|
588 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
|
|
589 CardTableModRefBS::PrecisionStyle precision,
|
|
590 HeapWord* boundary) {
|
|
591 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
|
|
592 }
|
|
593
|
|
594
|
|
595 // Note on locking for the space iteration functions:
|
|
596 // since the collector's iteration activities are concurrent with
|
|
597 // allocation activities by mutators, absent a suitable mutual exclusion
|
|
598 // mechanism the iterators may go awry. For instace a block being iterated
|
|
599 // may suddenly be allocated or divided up and part of it allocated and
|
|
600 // so on.
|
|
601
|
|
602 // Apply the given closure to each block in the space.
|
|
603 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
|
|
604 assert_lock_strong(freelistLock());
|
|
605 HeapWord *cur, *limit;
|
|
606 for (cur = bottom(), limit = end(); cur < limit;
|
|
607 cur += cl->do_blk_careful(cur));
|
|
608 }
|
|
609
|
|
610 // Apply the given closure to each block in the space.
|
|
611 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
|
|
612 assert_lock_strong(freelistLock());
|
|
613 HeapWord *cur, *limit;
|
|
614 for (cur = bottom(), limit = end(); cur < limit;
|
|
615 cur += cl->do_blk(cur));
|
|
616 }
|
|
617
|
|
618 // Apply the given closure to each oop in the space.
|
|
619 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
|
|
620 assert_lock_strong(freelistLock());
|
|
621 HeapWord *cur, *limit;
|
|
622 size_t curSize;
|
|
623 for (cur = bottom(), limit = end(); cur < limit;
|
|
624 cur += curSize) {
|
|
625 curSize = block_size(cur);
|
|
626 if (block_is_obj(cur)) {
|
|
627 oop(cur)->oop_iterate(cl);
|
|
628 }
|
|
629 }
|
|
630 }
|
|
631
|
|
632 // Apply the given closure to each oop in the space \intersect memory region.
|
|
633 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
|
|
634 assert_lock_strong(freelistLock());
|
|
635 if (is_empty()) {
|
|
636 return;
|
|
637 }
|
|
638 MemRegion cur = MemRegion(bottom(), end());
|
|
639 mr = mr.intersection(cur);
|
|
640 if (mr.is_empty()) {
|
|
641 return;
|
|
642 }
|
|
643 if (mr.equals(cur)) {
|
|
644 oop_iterate(cl);
|
|
645 return;
|
|
646 }
|
|
647 assert(mr.end() <= end(), "just took an intersection above");
|
|
648 HeapWord* obj_addr = block_start(mr.start());
|
|
649 HeapWord* t = mr.end();
|
|
650
|
|
651 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
|
|
652 if (block_is_obj(obj_addr)) {
|
|
653 // Handle first object specially.
|
|
654 oop obj = oop(obj_addr);
|
|
655 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
|
|
656 } else {
|
|
657 FreeChunk* fc = (FreeChunk*)obj_addr;
|
|
658 obj_addr += fc->size();
|
|
659 }
|
|
660 while (obj_addr < t) {
|
|
661 HeapWord* obj = obj_addr;
|
|
662 obj_addr += block_size(obj_addr);
|
|
663 // If "obj_addr" is not greater than top, then the
|
|
664 // entire object "obj" is within the region.
|
|
665 if (obj_addr <= t) {
|
|
666 if (block_is_obj(obj)) {
|
|
667 oop(obj)->oop_iterate(cl);
|
|
668 }
|
|
669 } else {
|
|
670 // "obj" extends beyond end of region
|
|
671 if (block_is_obj(obj)) {
|
|
672 oop(obj)->oop_iterate(&smr_blk);
|
|
673 }
|
|
674 break;
|
|
675 }
|
|
676 }
|
|
677 }
|
|
678
|
|
679 // NOTE: In the following methods, in order to safely be able to
|
|
680 // apply the closure to an object, we need to be sure that the
|
|
681 // object has been initialized. We are guaranteed that an object
|
|
682 // is initialized if we are holding the Heap_lock with the
|
|
683 // world stopped.
|
|
684 void CompactibleFreeListSpace::verify_objects_initialized() const {
|
|
685 if (is_init_completed()) {
|
|
686 assert_locked_or_safepoint(Heap_lock);
|
|
687 if (Universe::is_fully_initialized()) {
|
|
688 guarantee(SafepointSynchronize::is_at_safepoint(),
|
|
689 "Required for objects to be initialized");
|
|
690 }
|
|
691 } // else make a concession at vm start-up
|
|
692 }
|
|
693
|
|
694 // Apply the given closure to each object in the space
|
|
695 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
|
|
696 assert_lock_strong(freelistLock());
|
|
697 NOT_PRODUCT(verify_objects_initialized());
|
|
698 HeapWord *cur, *limit;
|
|
699 size_t curSize;
|
|
700 for (cur = bottom(), limit = end(); cur < limit;
|
|
701 cur += curSize) {
|
|
702 curSize = block_size(cur);
|
|
703 if (block_is_obj(cur)) {
|
|
704 blk->do_object(oop(cur));
|
|
705 }
|
|
706 }
|
|
707 }
|
|
708
|
|
709 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
|
|
710 UpwardsObjectClosure* cl) {
|
|
711 assert_locked();
|
|
712 NOT_PRODUCT(verify_objects_initialized());
|
|
713 Space::object_iterate_mem(mr, cl);
|
|
714 }
|
|
715
|
|
716 // Callers of this iterator beware: The closure application should
|
|
717 // be robust in the face of uninitialized objects and should (always)
|
|
718 // return a correct size so that the next addr + size below gives us a
|
|
719 // valid block boundary. [See for instance,
|
|
720 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
|
|
721 // in ConcurrentMarkSweepGeneration.cpp.]
|
|
722 HeapWord*
|
|
723 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
|
|
724 assert_lock_strong(freelistLock());
|
|
725 HeapWord *addr, *last;
|
|
726 size_t size;
|
|
727 for (addr = bottom(), last = end();
|
|
728 addr < last; addr += size) {
|
|
729 FreeChunk* fc = (FreeChunk*)addr;
|
|
730 if (fc->isFree()) {
|
|
731 // Since we hold the free list lock, which protects direct
|
|
732 // allocation in this generation by mutators, a free object
|
|
733 // will remain free throughout this iteration code.
|
|
734 size = fc->size();
|
|
735 } else {
|
|
736 // Note that the object need not necessarily be initialized,
|
|
737 // because (for instance) the free list lock does NOT protect
|
|
738 // object initialization. The closure application below must
|
|
739 // therefore be correct in the face of uninitialized objects.
|
|
740 size = cl->do_object_careful(oop(addr));
|
|
741 if (size == 0) {
|
|
742 // An unparsable object found. Signal early termination.
|
|
743 return addr;
|
|
744 }
|
|
745 }
|
|
746 }
|
|
747 return NULL;
|
|
748 }
|
|
749
|
|
750 // Callers of this iterator beware: The closure application should
|
|
751 // be robust in the face of uninitialized objects and should (always)
|
|
752 // return a correct size so that the next addr + size below gives us a
|
|
753 // valid block boundary. [See for instance,
|
|
754 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
|
|
755 // in ConcurrentMarkSweepGeneration.cpp.]
|
|
756 HeapWord*
|
|
757 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
|
|
758 ObjectClosureCareful* cl) {
|
|
759 assert_lock_strong(freelistLock());
|
|
760 // Can't use used_region() below because it may not necessarily
|
|
761 // be the same as [bottom(),end()); although we could
|
|
762 // use [used_region().start(),round_to(used_region().end(),CardSize)),
|
|
763 // that appears too cumbersome, so we just do the simpler check
|
|
764 // in the assertion below.
|
|
765 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
|
|
766 "mr should be non-empty and within used space");
|
|
767 HeapWord *addr, *end;
|
|
768 size_t size;
|
|
769 for (addr = block_start_careful(mr.start()), end = mr.end();
|
|
770 addr < end; addr += size) {
|
|
771 FreeChunk* fc = (FreeChunk*)addr;
|
|
772 if (fc->isFree()) {
|
|
773 // Since we hold the free list lock, which protects direct
|
|
774 // allocation in this generation by mutators, a free object
|
|
775 // will remain free throughout this iteration code.
|
|
776 size = fc->size();
|
|
777 } else {
|
|
778 // Note that the object need not necessarily be initialized,
|
|
779 // because (for instance) the free list lock does NOT protect
|
|
780 // object initialization. The closure application below must
|
|
781 // therefore be correct in the face of uninitialized objects.
|
|
782 size = cl->do_object_careful_m(oop(addr), mr);
|
|
783 if (size == 0) {
|
|
784 // An unparsable object found. Signal early termination.
|
|
785 return addr;
|
|
786 }
|
|
787 }
|
|
788 }
|
|
789 return NULL;
|
|
790 }
|
|
791
|
|
792
|
|
793 HeapWord* CompactibleFreeListSpace::block_start(const void* p) const {
|
|
794 NOT_PRODUCT(verify_objects_initialized());
|
|
795 return _bt.block_start(p);
|
|
796 }
|
|
797
|
|
798 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
|
|
799 return _bt.block_start_careful(p);
|
|
800 }
|
|
801
|
|
802 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
|
|
803 NOT_PRODUCT(verify_objects_initialized());
|
|
804 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
|
|
805 // This must be volatile, or else there is a danger that the compiler
|
|
806 // will compile the code below into a sometimes-infinite loop, by keeping
|
|
807 // the value read the first time in a register.
|
|
808 oop o = (oop)p;
|
|
809 volatile oop* second_word_addr = o->klass_addr();
|
|
810 while (true) {
|
|
811 klassOop k = (klassOop)(*second_word_addr);
|
|
812 // We must do this until we get a consistent view of the object.
|
|
813 if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
|
|
814 FreeChunk* fc = (FreeChunk*)p;
|
|
815 volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
|
|
816 size_t res = (*sz_addr);
|
|
817 klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
|
|
818 if (k == k2) {
|
|
819 assert(res != 0, "Block size should not be 0");
|
|
820 return res;
|
|
821 }
|
|
822 } else if (k != NULL) {
|
|
823 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
|
|
824 assert(o->is_parsable(), "Should be parsable");
|
|
825 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
|
|
826 size_t res = o->size_given_klass(k->klass_part());
|
|
827 res = adjustObjectSize(res);
|
|
828 assert(res != 0, "Block size should not be 0");
|
|
829 return res;
|
|
830 }
|
|
831 }
|
|
832 }
|
|
833
|
|
834 // A variant of the above that uses the Printezis bits for
|
|
835 // unparsable but allocated objects. This avoids any possible
|
|
836 // stalls waiting for mutators to initialize objects, and is
|
|
837 // thus potentially faster than the variant above. However,
|
|
838 // this variant may return a zero size for a block that is
|
|
839 // under mutation and for which a consistent size cannot be
|
|
840 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
|
|
841 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
|
|
842 const CMSCollector* c)
|
|
843 const {
|
|
844 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
|
|
845 // This must be volatile, or else there is a danger that the compiler
|
|
846 // will compile the code below into a sometimes-infinite loop, by keeping
|
|
847 // the value read the first time in a register.
|
|
848 oop o = (oop)p;
|
|
849 volatile oop* second_word_addr = o->klass_addr();
|
|
850 DEBUG_ONLY(uint loops = 0;)
|
|
851 while (true) {
|
|
852 klassOop k = (klassOop)(*second_word_addr);
|
|
853 // We must do this until we get a consistent view of the object.
|
|
854 if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
|
|
855 FreeChunk* fc = (FreeChunk*)p;
|
|
856 volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
|
|
857 size_t res = (*sz_addr);
|
|
858 klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
|
|
859 if (k == k2) {
|
|
860 assert(res != 0, "Block size should not be 0");
|
|
861 assert(loops == 0, "Should be 0");
|
|
862 return res;
|
|
863 }
|
|
864 } else if (k != NULL && o->is_parsable()) {
|
|
865 assert(k->is_oop(), "Should really be klass oop.");
|
|
866 assert(o->is_oop(), "Should be an oop");
|
|
867 size_t res = o->size_given_klass(k->klass_part());
|
|
868 res = adjustObjectSize(res);
|
|
869 assert(res != 0, "Block size should not be 0");
|
|
870 return res;
|
|
871 } else {
|
|
872 return c->block_size_if_printezis_bits(p);
|
|
873 }
|
|
874 assert(loops == 0, "Can loop at most once");
|
|
875 DEBUG_ONLY(loops++;)
|
|
876 }
|
|
877 }
|
|
878
|
|
879 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
|
|
880 NOT_PRODUCT(verify_objects_initialized());
|
|
881 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
|
|
882 FreeChunk* fc = (FreeChunk*)p;
|
|
883 if (fc->isFree()) {
|
|
884 return fc->size();
|
|
885 } else {
|
|
886 // Ignore mark word because this may be a recently promoted
|
|
887 // object whose mark word is used to chain together grey
|
|
888 // objects (the last one would have a null value).
|
|
889 assert(oop(p)->is_oop(true), "Should be an oop");
|
|
890 return adjustObjectSize(oop(p)->size());
|
|
891 }
|
|
892 }
|
|
893
|
|
894 // This implementation assumes that the property of "being an object" is
|
|
895 // stable. But being a free chunk may not be (because of parallel
|
|
896 // promotion.)
|
|
897 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
|
|
898 FreeChunk* fc = (FreeChunk*)p;
|
|
899 assert(is_in_reserved(p), "Should be in space");
|
|
900 // When doing a mark-sweep-compact of the CMS generation, this
|
|
901 // assertion may fail because prepare_for_compaction() uses
|
|
902 // space that is garbage to maintain information on ranges of
|
|
903 // live objects so that these live ranges can be moved as a whole.
|
|
904 // Comment out this assertion until that problem can be solved
|
|
905 // (i.e., that the block start calculation may look at objects
|
|
906 // at address below "p" in finding the object that contains "p"
|
|
907 // and those objects (if garbage) may have been modified to hold
|
|
908 // live range information.
|
|
909 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
|
|
910 klassOop k = oop(p)->klass();
|
|
911 intptr_t ki = (intptr_t)k;
|
|
912 if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false;
|
|
913 if (k != NULL) {
|
|
914 // Ignore mark word because it may have been used to
|
|
915 // chain together promoted objects (the last one
|
|
916 // would have a null value).
|
|
917 assert(oop(p)->is_oop(true), "Should be an oop");
|
|
918 return true;
|
|
919 } else {
|
|
920 return false; // Was not an object at the start of collection.
|
|
921 }
|
|
922 }
|
|
923
|
|
924 // Check if the object is alive. This fact is checked either by consulting
|
|
925 // the main marking bitmap in the sweeping phase or, if it's a permanent
|
|
926 // generation and we're not in the sweeping phase, by checking the
|
|
927 // perm_gen_verify_bit_map where we store the "deadness" information if
|
|
928 // we did not sweep the perm gen in the most recent previous GC cycle.
|
|
929 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
|
|
930 assert (block_is_obj(p), "The address should point to an object");
|
|
931
|
|
932 // If we're sweeping, we use object liveness information from the main bit map
|
|
933 // for both perm gen and old gen.
|
|
934 // We don't need to lock the bitmap (live_map or dead_map below), because
|
|
935 // EITHER we are in the middle of the sweeping phase, and the
|
|
936 // main marking bit map (live_map below) is locked,
|
|
937 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
|
|
938 // is stable, because it's mutated only in the sweeping phase.
|
|
939 if (_collector->abstract_state() == CMSCollector::Sweeping) {
|
|
940 CMSBitMap* live_map = _collector->markBitMap();
|
|
941 return live_map->isMarked((HeapWord*) p);
|
|
942 } else {
|
|
943 // If we're not currently sweeping and we haven't swept the perm gen in
|
|
944 // the previous concurrent cycle then we may have dead but unswept objects
|
|
945 // in the perm gen. In this case, we use the "deadness" information
|
|
946 // that we had saved in perm_gen_verify_bit_map at the last sweep.
|
|
947 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
|
|
948 if (_collector->verifying()) {
|
|
949 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
|
|
950 // Object is marked in the dead_map bitmap at the previous sweep
|
|
951 // when we know that it's dead; if the bitmap is not allocated then
|
|
952 // the object is alive.
|
|
953 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
|
|
954 || !dead_map->par_isMarked((HeapWord*) p);
|
|
955 } else {
|
|
956 return false; // We can't say for sure if it's live, so we say that it's dead.
|
|
957 }
|
|
958 }
|
|
959 }
|
|
960 return true;
|
|
961 }
|
|
962
|
|
963 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
|
|
964 FreeChunk* fc = (FreeChunk*)p;
|
|
965 assert(is_in_reserved(p), "Should be in space");
|
|
966 assert(_bt.block_start(p) == p, "Should be a block boundary");
|
|
967 if (!fc->isFree()) {
|
|
968 // Ignore mark word because it may have been used to
|
|
969 // chain together promoted objects (the last one
|
|
970 // would have a null value).
|
|
971 assert(oop(p)->is_oop(true), "Should be an oop");
|
|
972 return true;
|
|
973 }
|
|
974 return false;
|
|
975 }
|
|
976
|
|
977 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
|
|
978 // approximate answer if you don't hold the freelistlock when you call this.
|
|
979 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
|
|
980 size_t size = 0;
|
|
981 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
|
982 debug_only(
|
|
983 // We may be calling here without the lock in which case we
|
|
984 // won't do this modest sanity check.
|
|
985 if (freelistLock()->owned_by_self()) {
|
|
986 size_t total_list_size = 0;
|
|
987 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
|
|
988 fc = fc->next()) {
|
|
989 total_list_size += i;
|
|
990 }
|
|
991 assert(total_list_size == i * _indexedFreeList[i].count(),
|
|
992 "Count in list is incorrect");
|
|
993 }
|
|
994 )
|
|
995 size += i * _indexedFreeList[i].count();
|
|
996 }
|
|
997 return size;
|
|
998 }
|
|
999
|
|
1000 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
|
|
1001 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
|
|
1002 return allocate(size);
|
|
1003 }
|
|
1004
|
|
1005 HeapWord*
|
|
1006 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
|
|
1007 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
|
|
1008 }
|
|
1009
|
|
1010 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
|
|
1011 assert_lock_strong(freelistLock());
|
|
1012 HeapWord* res = NULL;
|
|
1013 assert(size == adjustObjectSize(size),
|
|
1014 "use adjustObjectSize() before calling into allocate()");
|
|
1015
|
|
1016 if (_adaptive_freelists) {
|
|
1017 res = allocate_adaptive_freelists(size);
|
|
1018 } else { // non-adaptive free lists
|
|
1019 res = allocate_non_adaptive_freelists(size);
|
|
1020 }
|
|
1021
|
|
1022 if (res != NULL) {
|
|
1023 // check that res does lie in this space!
|
|
1024 assert(is_in_reserved(res), "Not in this space!");
|
|
1025 assert(is_aligned((void*)res), "alignment check");
|
|
1026
|
|
1027 FreeChunk* fc = (FreeChunk*)res;
|
|
1028 fc->markNotFree();
|
|
1029 assert(!fc->isFree(), "shouldn't be marked free");
|
|
1030 assert(oop(fc)->klass() == NULL, "should look uninitialized");
|
|
1031 // Verify that the block offset table shows this to
|
|
1032 // be a single block, but not one which is unallocated.
|
|
1033 _bt.verify_single_block(res, size);
|
|
1034 _bt.verify_not_unallocated(res, size);
|
|
1035 // mangle a just allocated object with a distinct pattern.
|
|
1036 debug_only(fc->mangleAllocated(size));
|
|
1037 }
|
|
1038
|
|
1039 return res;
|
|
1040 }
|
|
1041
|
|
1042 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
|
|
1043 HeapWord* res = NULL;
|
|
1044 // try and use linear allocation for smaller blocks
|
|
1045 if (size < _smallLinearAllocBlock._allocation_size_limit) {
|
|
1046 // if successful, the following also adjusts block offset table
|
|
1047 res = getChunkFromSmallLinearAllocBlock(size);
|
|
1048 }
|
|
1049 // Else triage to indexed lists for smaller sizes
|
|
1050 if (res == NULL) {
|
|
1051 if (size < SmallForDictionary) {
|
|
1052 res = (HeapWord*) getChunkFromIndexedFreeList(size);
|
|
1053 } else {
|
|
1054 // else get it from the big dictionary; if even this doesn't
|
|
1055 // work we are out of luck.
|
|
1056 res = (HeapWord*)getChunkFromDictionaryExact(size);
|
|
1057 }
|
|
1058 }
|
|
1059
|
|
1060 return res;
|
|
1061 }
|
|
1062
|
|
1063 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
|
|
1064 assert_lock_strong(freelistLock());
|
|
1065 HeapWord* res = NULL;
|
|
1066 assert(size == adjustObjectSize(size),
|
|
1067 "use adjustObjectSize() before calling into allocate()");
|
|
1068
|
|
1069 // Strategy
|
|
1070 // if small
|
|
1071 // exact size from small object indexed list if small
|
|
1072 // small or large linear allocation block (linAB) as appropriate
|
|
1073 // take from lists of greater sized chunks
|
|
1074 // else
|
|
1075 // dictionary
|
|
1076 // small or large linear allocation block if it has the space
|
|
1077 // Try allocating exact size from indexTable first
|
|
1078 if (size < IndexSetSize) {
|
|
1079 res = (HeapWord*) getChunkFromIndexedFreeList(size);
|
|
1080 if(res != NULL) {
|
|
1081 assert(res != (HeapWord*)_indexedFreeList[size].head(),
|
|
1082 "Not removed from free list");
|
|
1083 // no block offset table adjustment is necessary on blocks in
|
|
1084 // the indexed lists.
|
|
1085
|
|
1086 // Try allocating from the small LinAB
|
|
1087 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
|
|
1088 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
|
|
1089 // if successful, the above also adjusts block offset table
|
|
1090 // Note that this call will refill the LinAB to
|
|
1091 // satisfy the request. This is different that
|
|
1092 // evm.
|
|
1093 // Don't record chunk off a LinAB? smallSplitBirth(size);
|
|
1094
|
|
1095 } else {
|
|
1096 // Raid the exact free lists larger than size, even if they are not
|
|
1097 // overpopulated.
|
|
1098 res = (HeapWord*) getChunkFromGreater(size);
|
|
1099 }
|
|
1100 } else {
|
|
1101 // Big objects get allocated directly from the dictionary.
|
|
1102 res = (HeapWord*) getChunkFromDictionaryExact(size);
|
|
1103 if (res == NULL) {
|
|
1104 // Try hard not to fail since an allocation failure will likely
|
|
1105 // trigger a synchronous GC. Try to get the space from the
|
|
1106 // allocation blocks.
|
|
1107 res = getChunkFromSmallLinearAllocBlockRemainder(size);
|
|
1108 }
|
|
1109 }
|
|
1110
|
|
1111 return res;
|
|
1112 }
|
|
1113
|
|
1114 // A worst-case estimate of the space required (in HeapWords) to expand the heap
|
|
1115 // when promoting obj.
|
|
1116 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
|
|
1117 // Depending on the object size, expansion may require refilling either a
|
|
1118 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
|
|
1119 // is added because the dictionary may over-allocate to avoid fragmentation.
|
|
1120 size_t space = obj_size;
|
|
1121 if (!_adaptive_freelists) {
|
|
1122 space = MAX2(space, _smallLinearAllocBlock._refillSize);
|
|
1123 }
|
|
1124 space += _promoInfo.refillSize() + 2 * MinChunkSize;
|
|
1125 return space;
|
|
1126 }
|
|
1127
|
|
1128 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
|
|
1129 FreeChunk* ret;
|
|
1130
|
|
1131 assert(numWords >= MinChunkSize, "Size is less than minimum");
|
|
1132 assert(linearAllocationWouldFail() || bestFitFirst(),
|
|
1133 "Should not be here");
|
|
1134
|
|
1135 size_t i;
|
|
1136 size_t currSize = numWords + MinChunkSize;
|
|
1137 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
|
|
1138 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
|
|
1139 FreeList* fl = &_indexedFreeList[i];
|
|
1140 if (fl->head()) {
|
|
1141 ret = getFromListGreater(fl, numWords);
|
|
1142 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
|
|
1143 return ret;
|
|
1144 }
|
|
1145 }
|
|
1146
|
|
1147 currSize = MAX2((size_t)SmallForDictionary,
|
|
1148 (size_t)(numWords + MinChunkSize));
|
|
1149
|
|
1150 /* Try to get a chunk that satisfies request, while avoiding
|
|
1151 fragmentation that can't be handled. */
|
|
1152 {
|
|
1153 ret = dictionary()->getChunk(currSize);
|
|
1154 if (ret != NULL) {
|
|
1155 assert(ret->size() - numWords >= MinChunkSize,
|
|
1156 "Chunk is too small");
|
|
1157 _bt.allocated((HeapWord*)ret, ret->size());
|
|
1158 /* Carve returned chunk. */
|
|
1159 (void) splitChunkAndReturnRemainder(ret, numWords);
|
|
1160 /* Label this as no longer a free chunk. */
|
|
1161 assert(ret->isFree(), "This chunk should be free");
|
|
1162 ret->linkPrev(NULL);
|
|
1163 }
|
|
1164 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
|
|
1165 return ret;
|
|
1166 }
|
|
1167 ShouldNotReachHere();
|
|
1168 }
|
|
1169
|
|
1170 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
|
|
1171 const {
|
|
1172 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
|
|
1173 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
|
|
1174 }
|
|
1175
|
|
1176 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
|
|
1177 if (fc->size() >= IndexSetSize) {
|
|
1178 return dictionary()->verifyChunkInFreeLists(fc);
|
|
1179 } else {
|
|
1180 return verifyChunkInIndexedFreeLists(fc);
|
|
1181 }
|
|
1182 }
|
|
1183
|
|
1184 #ifndef PRODUCT
|
|
1185 void CompactibleFreeListSpace::assert_locked() const {
|
|
1186 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
|
|
1187 }
|
|
1188 #endif
|
|
1189
|
|
1190 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
|
|
1191 // In the parallel case, the main thread holds the free list lock
|
|
1192 // on behalf the parallel threads.
|
|
1193 assert_locked();
|
|
1194 FreeChunk* fc;
|
|
1195 {
|
|
1196 // If GC is parallel, this might be called by several threads.
|
|
1197 // This should be rare enough that the locking overhead won't affect
|
|
1198 // the sequential code.
|
|
1199 MutexLockerEx x(parDictionaryAllocLock(),
|
|
1200 Mutex::_no_safepoint_check_flag);
|
|
1201 fc = getChunkFromDictionary(size);
|
|
1202 }
|
|
1203 if (fc != NULL) {
|
|
1204 fc->dontCoalesce();
|
|
1205 assert(fc->isFree(), "Should be free, but not coalescable");
|
|
1206 // Verify that the block offset table shows this to
|
|
1207 // be a single block, but not one which is unallocated.
|
|
1208 _bt.verify_single_block((HeapWord*)fc, fc->size());
|
|
1209 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
|
|
1210 }
|
|
1211 return fc;
|
|
1212 }
|
|
1213
|
|
1214 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
|
|
1215 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
|
1216 assert_locked();
|
|
1217
|
|
1218 // if we are tracking promotions, then first ensure space for
|
|
1219 // promotion (including spooling space for saving header if necessary).
|
|
1220 // then allocate and copy, then track promoted info if needed.
|
|
1221 // When tracking (see PromotionInfo::track()), the mark word may
|
|
1222 // be displaced and in this case restoration of the mark word
|
|
1223 // occurs in the (oop_since_save_marks_)iterate phase.
|
|
1224 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
|
|
1225 return NULL;
|
|
1226 }
|
|
1227 // Call the allocate(size_t, bool) form directly to avoid the
|
|
1228 // additional call through the allocate(size_t) form. Having
|
|
1229 // the compile inline the call is problematic because allocate(size_t)
|
|
1230 // is a virtual method.
|
|
1231 HeapWord* res = allocate(adjustObjectSize(obj_size));
|
|
1232 if (res != NULL) {
|
|
1233 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
|
|
1234 // if we should be tracking promotions, do so.
|
|
1235 if (_promoInfo.tracking()) {
|
|
1236 _promoInfo.track((PromotedObject*)res);
|
|
1237 }
|
|
1238 }
|
|
1239 return oop(res);
|
|
1240 }
|
|
1241
|
|
1242 HeapWord*
|
|
1243 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
|
|
1244 assert_locked();
|
|
1245 assert(size >= MinChunkSize, "minimum chunk size");
|
|
1246 assert(size < _smallLinearAllocBlock._allocation_size_limit,
|
|
1247 "maximum from smallLinearAllocBlock");
|
|
1248 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
|
|
1249 }
|
|
1250
|
|
1251 HeapWord*
|
|
1252 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
|
|
1253 size_t size) {
|
|
1254 assert_locked();
|
|
1255 assert(size >= MinChunkSize, "too small");
|
|
1256 HeapWord* res = NULL;
|
|
1257 // Try to do linear allocation from blk, making sure that
|
|
1258 if (blk->_word_size == 0) {
|
|
1259 // We have probably been unable to fill this either in the prologue or
|
|
1260 // when it was exhausted at the last linear allocation. Bail out until
|
|
1261 // next time.
|
|
1262 assert(blk->_ptr == NULL, "consistency check");
|
|
1263 return NULL;
|
|
1264 }
|
|
1265 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
|
|
1266 res = getChunkFromLinearAllocBlockRemainder(blk, size);
|
|
1267 if (res != NULL) return res;
|
|
1268
|
|
1269 // about to exhaust this linear allocation block
|
|
1270 if (blk->_word_size == size) { // exactly satisfied
|
|
1271 res = blk->_ptr;
|
|
1272 _bt.allocated(res, blk->_word_size);
|
|
1273 } else if (size + MinChunkSize <= blk->_refillSize) {
|
|
1274 // Update _unallocated_block if the size is such that chunk would be
|
|
1275 // returned to the indexed free list. All other chunks in the indexed
|
|
1276 // free lists are allocated from the dictionary so that _unallocated_block
|
|
1277 // has already been adjusted for them. Do it here so that the cost
|
|
1278 // for all chunks added back to the indexed free lists.
|
|
1279 if (blk->_word_size < SmallForDictionary) {
|
|
1280 _bt.allocated(blk->_ptr, blk->_word_size);
|
|
1281 }
|
|
1282 // Return the chunk that isn't big enough, and then refill below.
|
|
1283 addChunkToFreeLists(blk->_ptr, blk->_word_size);
|
|
1284 _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
|
|
1285 // Don't keep statistics on adding back chunk from a LinAB.
|
|
1286 } else {
|
|
1287 // A refilled block would not satisfy the request.
|
|
1288 return NULL;
|
|
1289 }
|
|
1290
|
|
1291 blk->_ptr = NULL; blk->_word_size = 0;
|
|
1292 refillLinearAllocBlock(blk);
|
|
1293 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
|
|
1294 "block was replenished");
|
|
1295 if (res != NULL) {
|
|
1296 splitBirth(size);
|
|
1297 repairLinearAllocBlock(blk);
|
|
1298 } else if (blk->_ptr != NULL) {
|
|
1299 res = blk->_ptr;
|
|
1300 size_t blk_size = blk->_word_size;
|
|
1301 blk->_word_size -= size;
|
|
1302 blk->_ptr += size;
|
|
1303 splitBirth(size);
|
|
1304 repairLinearAllocBlock(blk);
|
|
1305 // Update BOT last so that other (parallel) GC threads see a consistent
|
|
1306 // view of the BOT and free blocks.
|
|
1307 // Above must occur before BOT is updated below.
|
|
1308 _bt.split_block(res, blk_size, size); // adjust block offset table
|
|
1309 }
|
|
1310 return res;
|
|
1311 }
|
|
1312
|
|
1313 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
|
|
1314 LinearAllocBlock* blk,
|
|
1315 size_t size) {
|
|
1316 assert_locked();
|
|
1317 assert(size >= MinChunkSize, "too small");
|
|
1318
|
|
1319 HeapWord* res = NULL;
|
|
1320 // This is the common case. Keep it simple.
|
|
1321 if (blk->_word_size >= size + MinChunkSize) {
|
|
1322 assert(blk->_ptr != NULL, "consistency check");
|
|
1323 res = blk->_ptr;
|
|
1324 // Note that the BOT is up-to-date for the linAB before allocation. It
|
|
1325 // indicates the start of the linAB. The split_block() updates the
|
|
1326 // BOT for the linAB after the allocation (indicates the start of the
|
|
1327 // next chunk to be allocated).
|
|
1328 size_t blk_size = blk->_word_size;
|
|
1329 blk->_word_size -= size;
|
|
1330 blk->_ptr += size;
|
|
1331 splitBirth(size);
|
|
1332 repairLinearAllocBlock(blk);
|
|
1333 // Update BOT last so that other (parallel) GC threads see a consistent
|
|
1334 // view of the BOT and free blocks.
|
|
1335 // Above must occur before BOT is updated below.
|
|
1336 _bt.split_block(res, blk_size, size); // adjust block offset table
|
|
1337 _bt.allocated(res, size);
|
|
1338 }
|
|
1339 return res;
|
|
1340 }
|
|
1341
|
|
1342 FreeChunk*
|
|
1343 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
|
|
1344 assert_locked();
|
|
1345 assert(size < SmallForDictionary, "just checking");
|
|
1346 FreeChunk* res;
|
|
1347 res = _indexedFreeList[size].getChunkAtHead();
|
|
1348 if (res == NULL) {
|
|
1349 res = getChunkFromIndexedFreeListHelper(size);
|
|
1350 }
|
|
1351 _bt.verify_not_unallocated((HeapWord*) res, size);
|
|
1352 return res;
|
|
1353 }
|
|
1354
|
|
1355 FreeChunk*
|
|
1356 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
|
|
1357 assert_locked();
|
|
1358 FreeChunk* fc = NULL;
|
|
1359 if (size < SmallForDictionary) {
|
|
1360 assert(_indexedFreeList[size].head() == NULL ||
|
|
1361 _indexedFreeList[size].surplus() <= 0,
|
|
1362 "List for this size should be empty or under populated");
|
|
1363 // Try best fit in exact lists before replenishing the list
|
|
1364 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
|
|
1365 // Replenish list.
|
|
1366 //
|
|
1367 // Things tried that failed.
|
|
1368 // Tried allocating out of the two LinAB's first before
|
|
1369 // replenishing lists.
|
|
1370 // Tried small linAB of size 256 (size in indexed list)
|
|
1371 // and replenishing indexed lists from the small linAB.
|
|
1372 //
|
|
1373 FreeChunk* newFc = NULL;
|
|
1374 size_t replenish_size = CMSIndexedFreeListReplenish * size;
|
|
1375 if (replenish_size < SmallForDictionary) {
|
|
1376 // Do not replenish from an underpopulated size.
|
|
1377 if (_indexedFreeList[replenish_size].surplus() > 0 &&
|
|
1378 _indexedFreeList[replenish_size].head() != NULL) {
|
|
1379 newFc =
|
|
1380 _indexedFreeList[replenish_size].getChunkAtHead();
|
|
1381 } else {
|
|
1382 newFc = bestFitSmall(replenish_size);
|
|
1383 }
|
|
1384 }
|
|
1385 if (newFc != NULL) {
|
|
1386 splitDeath(replenish_size);
|
|
1387 } else if (replenish_size > size) {
|
|
1388 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
|
|
1389 newFc =
|
|
1390 getChunkFromIndexedFreeListHelper(replenish_size);
|
|
1391 }
|
|
1392 if (newFc != NULL) {
|
|
1393 assert(newFc->size() == replenish_size, "Got wrong size");
|
|
1394 size_t i;
|
|
1395 FreeChunk *curFc, *nextFc;
|
|
1396 // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
|
|
1397 // The last chunk is not added to the lists but is returned as the
|
|
1398 // free chunk.
|
|
1399 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
|
|
1400 i = 0;
|
|
1401 i < (CMSIndexedFreeListReplenish - 1);
|
|
1402 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
|
|
1403 i++) {
|
|
1404 curFc->setSize(size);
|
|
1405 // Don't record this as a return in order to try and
|
|
1406 // determine the "returns" from a GC.
|
|
1407 _bt.verify_not_unallocated((HeapWord*) fc, size);
|
|
1408 _indexedFreeList[size].returnChunkAtTail(curFc, false);
|
|
1409 _bt.mark_block((HeapWord*)curFc, size);
|
|
1410 splitBirth(size);
|
|
1411 // Don't record the initial population of the indexed list
|
|
1412 // as a split birth.
|
|
1413 }
|
|
1414
|
|
1415 // check that the arithmetic was OK above
|
|
1416 assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
|
|
1417 "inconsistency in carving newFc");
|
|
1418 curFc->setSize(size);
|
|
1419 _bt.mark_block((HeapWord*)curFc, size);
|
|
1420 splitBirth(size);
|
|
1421 return curFc;
|
|
1422 }
|
|
1423 }
|
|
1424 } else {
|
|
1425 // Get a free chunk from the free chunk dictionary to be returned to
|
|
1426 // replenish the indexed free list.
|
|
1427 fc = getChunkFromDictionaryExact(size);
|
|
1428 }
|
|
1429 assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
|
|
1430 return fc;
|
|
1431 }
|
|
1432
|
|
1433 FreeChunk*
|
|
1434 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
|
|
1435 assert_locked();
|
|
1436 FreeChunk* fc = _dictionary->getChunk(size);
|
|
1437 if (fc == NULL) {
|
|
1438 return NULL;
|
|
1439 }
|
|
1440 _bt.allocated((HeapWord*)fc, fc->size());
|
|
1441 if (fc->size() >= size + MinChunkSize) {
|
|
1442 fc = splitChunkAndReturnRemainder(fc, size);
|
|
1443 }
|
|
1444 assert(fc->size() >= size, "chunk too small");
|
|
1445 assert(fc->size() < size + MinChunkSize, "chunk too big");
|
|
1446 _bt.verify_single_block((HeapWord*)fc, fc->size());
|
|
1447 return fc;
|
|
1448 }
|
|
1449
|
|
1450 FreeChunk*
|
|
1451 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
|
|
1452 assert_locked();
|
|
1453 FreeChunk* fc = _dictionary->getChunk(size);
|
|
1454 if (fc == NULL) {
|
|
1455 return fc;
|
|
1456 }
|
|
1457 _bt.allocated((HeapWord*)fc, fc->size());
|
|
1458 if (fc->size() == size) {
|
|
1459 _bt.verify_single_block((HeapWord*)fc, size);
|
|
1460 return fc;
|
|
1461 }
|
|
1462 assert(fc->size() > size, "getChunk() guarantee");
|
|
1463 if (fc->size() < size + MinChunkSize) {
|
|
1464 // Return the chunk to the dictionary and go get a bigger one.
|
|
1465 returnChunkToDictionary(fc);
|
|
1466 fc = _dictionary->getChunk(size + MinChunkSize);
|
|
1467 if (fc == NULL) {
|
|
1468 return NULL;
|
|
1469 }
|
|
1470 _bt.allocated((HeapWord*)fc, fc->size());
|
|
1471 }
|
|
1472 assert(fc->size() >= size + MinChunkSize, "tautology");
|
|
1473 fc = splitChunkAndReturnRemainder(fc, size);
|
|
1474 assert(fc->size() == size, "chunk is wrong size");
|
|
1475 _bt.verify_single_block((HeapWord*)fc, size);
|
|
1476 return fc;
|
|
1477 }
|
|
1478
|
|
1479 void
|
|
1480 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
|
|
1481 assert_locked();
|
|
1482
|
|
1483 size_t size = chunk->size();
|
|
1484 _bt.verify_single_block((HeapWord*)chunk, size);
|
|
1485 // adjust _unallocated_block downward, as necessary
|
|
1486 _bt.freed((HeapWord*)chunk, size);
|
|
1487 _dictionary->returnChunk(chunk);
|
|
1488 }
|
|
1489
|
|
1490 void
|
|
1491 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
|
|
1492 assert_locked();
|
|
1493 size_t size = fc->size();
|
|
1494 _bt.verify_single_block((HeapWord*) fc, size);
|
|
1495 _bt.verify_not_unallocated((HeapWord*) fc, size);
|
|
1496 if (_adaptive_freelists) {
|
|
1497 _indexedFreeList[size].returnChunkAtTail(fc);
|
|
1498 } else {
|
|
1499 _indexedFreeList[size].returnChunkAtHead(fc);
|
|
1500 }
|
|
1501 }
|
|
1502
|
|
1503 // Add chunk to end of last block -- if it's the largest
|
|
1504 // block -- and update BOT and census data. We would
|
|
1505 // of course have preferred to coalesce it with the
|
|
1506 // last block, but it's currently less expensive to find the
|
|
1507 // largest block than it is to find the last.
|
|
1508 void
|
|
1509 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
|
|
1510 HeapWord* chunk, size_t size) {
|
|
1511 // check that the chunk does lie in this space!
|
|
1512 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
|
|
1513 assert_locked();
|
|
1514 // One of the parallel gc task threads may be here
|
|
1515 // whilst others are allocating.
|
|
1516 Mutex* lock = NULL;
|
|
1517 if (ParallelGCThreads != 0) {
|
|
1518 lock = &_parDictionaryAllocLock;
|
|
1519 }
|
|
1520 FreeChunk* ec;
|
|
1521 {
|
|
1522 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
|
|
1523 ec = dictionary()->findLargestDict(); // get largest block
|
|
1524 if (ec != NULL && ec->end() == chunk) {
|
|
1525 // It's a coterminal block - we can coalesce.
|
|
1526 size_t old_size = ec->size();
|
|
1527 coalDeath(old_size);
|
|
1528 removeChunkFromDictionary(ec);
|
|
1529 size += old_size;
|
|
1530 } else {
|
|
1531 ec = (FreeChunk*)chunk;
|
|
1532 }
|
|
1533 }
|
|
1534 ec->setSize(size);
|
|
1535 debug_only(ec->mangleFreed(size));
|
|
1536 if (size < SmallForDictionary) {
|
|
1537 lock = _indexedFreeListParLocks[size];
|
|
1538 }
|
|
1539 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
|
|
1540 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
|
|
1541 // record the birth under the lock since the recording involves
|
|
1542 // manipulation of the list on which the chunk lives and
|
|
1543 // if the chunk is allocated and is the last on the list,
|
|
1544 // the list can go away.
|
|
1545 coalBirth(size);
|
|
1546 }
|
|
1547
|
|
1548 void
|
|
1549 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
|
|
1550 size_t size) {
|
|
1551 // check that the chunk does lie in this space!
|
|
1552 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
|
|
1553 assert_locked();
|
|
1554 _bt.verify_single_block(chunk, size);
|
|
1555
|
|
1556 FreeChunk* fc = (FreeChunk*) chunk;
|
|
1557 fc->setSize(size);
|
|
1558 debug_only(fc->mangleFreed(size));
|
|
1559 if (size < SmallForDictionary) {
|
|
1560 returnChunkToFreeList(fc);
|
|
1561 } else {
|
|
1562 returnChunkToDictionary(fc);
|
|
1563 }
|
|
1564 }
|
|
1565
|
|
1566 void
|
|
1567 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
|
|
1568 size_t size, bool coalesced) {
|
|
1569 assert_locked();
|
|
1570 assert(chunk != NULL, "null chunk");
|
|
1571 if (coalesced) {
|
|
1572 // repair BOT
|
|
1573 _bt.single_block(chunk, size);
|
|
1574 }
|
|
1575 addChunkToFreeLists(chunk, size);
|
|
1576 }
|
|
1577
|
|
1578 // We _must_ find the purported chunk on our free lists;
|
|
1579 // we assert if we don't.
|
|
1580 void
|
|
1581 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
|
|
1582 size_t size = fc->size();
|
|
1583 assert_locked();
|
|
1584 debug_only(verifyFreeLists());
|
|
1585 if (size < SmallForDictionary) {
|
|
1586 removeChunkFromIndexedFreeList(fc);
|
|
1587 } else {
|
|
1588 removeChunkFromDictionary(fc);
|
|
1589 }
|
|
1590 _bt.verify_single_block((HeapWord*)fc, size);
|
|
1591 debug_only(verifyFreeLists());
|
|
1592 }
|
|
1593
|
|
1594 void
|
|
1595 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
|
|
1596 size_t size = fc->size();
|
|
1597 assert_locked();
|
|
1598 assert(fc != NULL, "null chunk");
|
|
1599 _bt.verify_single_block((HeapWord*)fc, size);
|
|
1600 _dictionary->removeChunk(fc);
|
|
1601 // adjust _unallocated_block upward, as necessary
|
|
1602 _bt.allocated((HeapWord*)fc, size);
|
|
1603 }
|
|
1604
|
|
1605 void
|
|
1606 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
|
|
1607 assert_locked();
|
|
1608 size_t size = fc->size();
|
|
1609 _bt.verify_single_block((HeapWord*)fc, size);
|
|
1610 NOT_PRODUCT(
|
|
1611 if (FLSVerifyIndexTable) {
|
|
1612 verifyIndexedFreeList(size);
|
|
1613 }
|
|
1614 )
|
|
1615 _indexedFreeList[size].removeChunk(fc);
|
|
1616 debug_only(fc->clearNext());
|
|
1617 debug_only(fc->clearPrev());
|
|
1618 NOT_PRODUCT(
|
|
1619 if (FLSVerifyIndexTable) {
|
|
1620 verifyIndexedFreeList(size);
|
|
1621 }
|
|
1622 )
|
|
1623 }
|
|
1624
|
|
1625 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
|
|
1626 /* A hint is the next larger size that has a surplus.
|
|
1627 Start search at a size large enough to guarantee that
|
|
1628 the excess is >= MIN_CHUNK. */
|
|
1629 size_t start = align_object_size(numWords + MinChunkSize);
|
|
1630 if (start < IndexSetSize) {
|
|
1631 FreeList* it = _indexedFreeList;
|
|
1632 size_t hint = _indexedFreeList[start].hint();
|
|
1633 while (hint < IndexSetSize) {
|
|
1634 assert(hint % MinObjAlignment == 0, "hint should be aligned");
|
|
1635 FreeList *fl = &_indexedFreeList[hint];
|
|
1636 if (fl->surplus() > 0 && fl->head() != NULL) {
|
|
1637 // Found a list with surplus, reset original hint
|
|
1638 // and split out a free chunk which is returned.
|
|
1639 _indexedFreeList[start].set_hint(hint);
|
|
1640 FreeChunk* res = getFromListGreater(fl, numWords);
|
|
1641 assert(res == NULL || res->isFree(),
|
|
1642 "Should be returning a free chunk");
|
|
1643 return res;
|
|
1644 }
|
|
1645 hint = fl->hint(); /* keep looking */
|
|
1646 }
|
|
1647 /* None found. */
|
|
1648 it[start].set_hint(IndexSetSize);
|
|
1649 }
|
|
1650 return NULL;
|
|
1651 }
|
|
1652
|
|
1653 /* Requires fl->size >= numWords + MinChunkSize */
|
|
1654 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
|
|
1655 size_t numWords) {
|
|
1656 FreeChunk *curr = fl->head();
|
|
1657 size_t oldNumWords = curr->size();
|
|
1658 assert(numWords >= MinChunkSize, "Word size is too small");
|
|
1659 assert(curr != NULL, "List is empty");
|
|
1660 assert(oldNumWords >= numWords + MinChunkSize,
|
|
1661 "Size of chunks in the list is too small");
|
|
1662
|
|
1663 fl->removeChunk(curr);
|
|
1664 // recorded indirectly by splitChunkAndReturnRemainder -
|
|
1665 // smallSplit(oldNumWords, numWords);
|
|
1666 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
|
|
1667 // Does anything have to be done for the remainder in terms of
|
|
1668 // fixing the card table?
|
|
1669 assert(new_chunk == NULL || new_chunk->isFree(),
|
|
1670 "Should be returning a free chunk");
|
|
1671 return new_chunk;
|
|
1672 }
|
|
1673
|
|
1674 FreeChunk*
|
|
1675 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
|
|
1676 size_t new_size) {
|
|
1677 assert_locked();
|
|
1678 size_t size = chunk->size();
|
|
1679 assert(size > new_size, "Split from a smaller block?");
|
|
1680 assert(is_aligned(chunk), "alignment problem");
|
|
1681 assert(size == adjustObjectSize(size), "alignment problem");
|
|
1682 size_t rem_size = size - new_size;
|
|
1683 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
|
|
1684 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
|
|
1685 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
|
|
1686 assert(is_aligned(ffc), "alignment problem");
|
|
1687 ffc->setSize(rem_size);
|
|
1688 ffc->linkNext(NULL);
|
|
1689 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
|
|
1690 // Above must occur before BOT is updated below.
|
|
1691 // adjust block offset table
|
|
1692 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
|
|
1693 if (rem_size < SmallForDictionary) {
|
|
1694 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
|
|
1695 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
|
|
1696 returnChunkToFreeList(ffc);
|
|
1697 split(size, rem_size);
|
|
1698 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
|
|
1699 } else {
|
|
1700 returnChunkToDictionary(ffc);
|
|
1701 split(size ,rem_size);
|
|
1702 }
|
|
1703 chunk->setSize(new_size);
|
|
1704 return chunk;
|
|
1705 }
|
|
1706
|
|
1707 void
|
|
1708 CompactibleFreeListSpace::sweep_completed() {
|
|
1709 // Now that space is probably plentiful, refill linear
|
|
1710 // allocation blocks as needed.
|
|
1711 refillLinearAllocBlocksIfNeeded();
|
|
1712 }
|
|
1713
|
|
1714 void
|
|
1715 CompactibleFreeListSpace::gc_prologue() {
|
|
1716 assert_locked();
|
|
1717 if (PrintFLSStatistics != 0) {
|
|
1718 gclog_or_tty->print("Before GC:\n");
|
|
1719 reportFreeListStatistics();
|
|
1720 }
|
|
1721 refillLinearAllocBlocksIfNeeded();
|
|
1722 }
|
|
1723
|
|
1724 void
|
|
1725 CompactibleFreeListSpace::gc_epilogue() {
|
|
1726 assert_locked();
|
|
1727 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
|
|
1728 if (_smallLinearAllocBlock._word_size == 0)
|
|
1729 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
|
|
1730 }
|
|
1731 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
|
|
1732 _promoInfo.stopTrackingPromotions();
|
|
1733 repairLinearAllocationBlocks();
|
|
1734 // Print Space's stats
|
|
1735 if (PrintFLSStatistics != 0) {
|
|
1736 gclog_or_tty->print("After GC:\n");
|
|
1737 reportFreeListStatistics();
|
|
1738 }
|
|
1739 }
|
|
1740
|
|
1741 // Iteration support, mostly delegated from a CMS generation
|
|
1742
|
|
1743 void CompactibleFreeListSpace::save_marks() {
|
|
1744 // mark the "end" of the used space at the time of this call;
|
|
1745 // note, however, that promoted objects from this point
|
|
1746 // on are tracked in the _promoInfo below.
|
|
1747 set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
|
|
1748 unallocated_block() : end());
|
|
1749 // inform allocator that promotions should be tracked.
|
|
1750 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
|
|
1751 _promoInfo.startTrackingPromotions();
|
|
1752 }
|
|
1753
|
|
1754 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
|
|
1755 assert(_promoInfo.tracking(), "No preceding save_marks?");
|
|
1756 guarantee(SharedHeap::heap()->n_par_threads() == 0,
|
|
1757 "Shouldn't be called (yet) during parallel part of gc.");
|
|
1758 return _promoInfo.noPromotions();
|
|
1759 }
|
|
1760
|
|
1761 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
|
|
1762 \
|
|
1763 void CompactibleFreeListSpace:: \
|
|
1764 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
|
|
1765 assert(SharedHeap::heap()->n_par_threads() == 0, \
|
|
1766 "Shouldn't be called (yet) during parallel part of gc."); \
|
|
1767 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
|
|
1768 /* \
|
|
1769 * This also restores any displaced headers and removes the elements from \
|
|
1770 * the iteration set as they are processed, so that we have a clean slate \
|
|
1771 * at the end of the iteration. Note, thus, that if new objects are \
|
|
1772 * promoted as a result of the iteration they are iterated over as well. \
|
|
1773 */ \
|
|
1774 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
|
|
1775 }
|
|
1776
|
|
1777 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
|
|
1778
|
|
1779 //////////////////////////////////////////////////////////////////////////////
|
|
1780 // We go over the list of promoted objects, removing each from the list,
|
|
1781 // and applying the closure (this may, in turn, add more elements to
|
|
1782 // the tail of the promoted list, and these newly added objects will
|
|
1783 // also be processed) until the list is empty.
|
|
1784 // To aid verification and debugging, in the non-product builds
|
|
1785 // we actually forward _promoHead each time we process a promoted oop.
|
|
1786 // Note that this is not necessary in general (i.e. when we don't need to
|
|
1787 // call PromotionInfo::verify()) because oop_iterate can only add to the
|
|
1788 // end of _promoTail, and never needs to look at _promoHead.
|
|
1789
|
|
1790 #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
|
1791 \
|
|
1792 void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) { \
|
|
1793 NOT_PRODUCT(verify()); \
|
|
1794 PromotedObject *curObj, *nextObj; \
|
|
1795 for (curObj = _promoHead; curObj != NULL; curObj = nextObj) { \
|
|
1796 if ((nextObj = curObj->next()) == NULL) { \
|
|
1797 /* protect ourselves against additions due to closure application \
|
|
1798 below by resetting the list. */ \
|
|
1799 assert(_promoTail == curObj, "Should have been the tail"); \
|
|
1800 _promoHead = _promoTail = NULL; \
|
|
1801 } \
|
|
1802 if (curObj->hasDisplacedMark()) { \
|
|
1803 /* restore displaced header */ \
|
|
1804 oop(curObj)->set_mark(nextDisplacedHeader()); \
|
|
1805 } else { \
|
|
1806 /* restore prototypical header */ \
|
|
1807 oop(curObj)->init_mark(); \
|
|
1808 } \
|
|
1809 /* The "promoted_mark" should now not be set */ \
|
|
1810 assert(!curObj->hasPromotedMark(), \
|
|
1811 "Should have been cleared by restoring displaced mark-word"); \
|
|
1812 NOT_PRODUCT(_promoHead = nextObj); \
|
|
1813 if (cl != NULL) oop(curObj)->oop_iterate(cl); \
|
|
1814 if (nextObj == NULL) { /* start at head of list reset above */ \
|
|
1815 nextObj = _promoHead; \
|
|
1816 } \
|
|
1817 } \
|
|
1818 assert(noPromotions(), "post-condition violation"); \
|
|
1819 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
|
|
1820 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
|
|
1821 assert(_firstIndex == _nextIndex, "empty buffer"); \
|
|
1822 }
|
|
1823
|
|
1824 // This should have been ALL_SINCE_...() just like the others,
|
|
1825 // but, because the body of the method above is somehwat longer,
|
|
1826 // the MSVC compiler cannot cope; as a workaround, we split the
|
|
1827 // macro into its 3 constituent parts below (see original macro
|
|
1828 // definition in specializedOopClosures.hpp).
|
|
1829 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
|
|
1830 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
|
|
1831
|
|
1832
|
|
1833 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
|
|
1834 // ugghh... how would one do this efficiently for a non-contiguous space?
|
|
1835 guarantee(false, "NYI");
|
|
1836 }
|
|
1837
|
|
1838 bool CompactibleFreeListSpace::linearAllocationWouldFail() {
|
|
1839 return _smallLinearAllocBlock._word_size == 0;
|
|
1840 }
|
|
1841
|
|
1842 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
|
|
1843 // Fix up linear allocation blocks to look like free blocks
|
|
1844 repairLinearAllocBlock(&_smallLinearAllocBlock);
|
|
1845 }
|
|
1846
|
|
1847 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
|
|
1848 assert_locked();
|
|
1849 if (blk->_ptr != NULL) {
|
|
1850 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
|
|
1851 "Minimum block size requirement");
|
|
1852 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
|
|
1853 fc->setSize(blk->_word_size);
|
|
1854 fc->linkPrev(NULL); // mark as free
|
|
1855 fc->dontCoalesce();
|
|
1856 assert(fc->isFree(), "just marked it free");
|
|
1857 assert(fc->cantCoalesce(), "just marked it uncoalescable");
|
|
1858 }
|
|
1859 }
|
|
1860
|
|
1861 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
|
|
1862 assert_locked();
|
|
1863 if (_smallLinearAllocBlock._ptr == NULL) {
|
|
1864 assert(_smallLinearAllocBlock._word_size == 0,
|
|
1865 "Size of linAB should be zero if the ptr is NULL");
|
|
1866 // Reset the linAB refill and allocation size limit.
|
|
1867 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
|
|
1868 }
|
|
1869 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
|
|
1870 }
|
|
1871
|
|
1872 void
|
|
1873 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
|
|
1874 assert_locked();
|
|
1875 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
|
|
1876 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
|
|
1877 "blk invariant");
|
|
1878 if (blk->_ptr == NULL) {
|
|
1879 refillLinearAllocBlock(blk);
|
|
1880 }
|
|
1881 if (PrintMiscellaneous && Verbose) {
|
|
1882 if (blk->_word_size == 0) {
|
|
1883 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
|
|
1884 }
|
|
1885 }
|
|
1886 }
|
|
1887
|
|
1888 void
|
|
1889 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
|
|
1890 assert_locked();
|
|
1891 assert(blk->_word_size == 0 && blk->_ptr == NULL,
|
|
1892 "linear allocation block should be empty");
|
|
1893 FreeChunk* fc;
|
|
1894 if (blk->_refillSize < SmallForDictionary &&
|
|
1895 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
|
|
1896 // A linAB's strategy might be to use small sizes to reduce
|
|
1897 // fragmentation but still get the benefits of allocation from a
|
|
1898 // linAB.
|
|
1899 } else {
|
|
1900 fc = getChunkFromDictionary(blk->_refillSize);
|
|
1901 }
|
|
1902 if (fc != NULL) {
|
|
1903 blk->_ptr = (HeapWord*)fc;
|
|
1904 blk->_word_size = fc->size();
|
|
1905 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
|
|
1906 }
|
|
1907 }
|
|
1908
|
|
1909 // Support for compaction
|
|
1910
|
|
1911 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
|
|
1912 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
|
|
1913 // prepare_for_compaction() uses the space between live objects
|
|
1914 // so that later phase can skip dead space quickly. So verification
|
|
1915 // of the free lists doesn't work after.
|
|
1916 }
|
|
1917
|
|
1918 #define obj_size(q) adjustObjectSize(oop(q)->size())
|
|
1919 #define adjust_obj_size(s) adjustObjectSize(s)
|
|
1920
|
|
1921 void CompactibleFreeListSpace::adjust_pointers() {
|
|
1922 // In other versions of adjust_pointers(), a bail out
|
|
1923 // based on the amount of live data in the generation
|
|
1924 // (i.e., if 0, bail out) may be used.
|
|
1925 // Cannot test used() == 0 here because the free lists have already
|
|
1926 // been mangled by the compaction.
|
|
1927
|
|
1928 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
|
|
1929 // See note about verification in prepare_for_compaction().
|
|
1930 }
|
|
1931
|
|
1932 void CompactibleFreeListSpace::compact() {
|
|
1933 SCAN_AND_COMPACT(obj_size);
|
|
1934 }
|
|
1935
|
|
1936 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
|
|
1937 // where fbs is free block sizes
|
|
1938 double CompactibleFreeListSpace::flsFrag() const {
|
|
1939 size_t itabFree = totalSizeInIndexedFreeLists();
|
|
1940 double frag = 0.0;
|
|
1941 size_t i;
|
|
1942
|
|
1943 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
|
1944 double sz = i;
|
|
1945 frag += _indexedFreeList[i].count() * (sz * sz);
|
|
1946 }
|
|
1947
|
|
1948 double totFree = itabFree +
|
|
1949 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
|
|
1950 if (totFree > 0) {
|
|
1951 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
|
|
1952 (totFree * totFree));
|
|
1953 frag = (double)1.0 - frag;
|
|
1954 } else {
|
|
1955 assert(frag == 0.0, "Follows from totFree == 0");
|
|
1956 }
|
|
1957 return frag;
|
|
1958 }
|
|
1959
|
|
1960 #define CoalSurplusPercent 1.05
|
|
1961 #define SplitSurplusPercent 1.10
|
|
1962
|
|
1963 void CompactibleFreeListSpace::beginSweepFLCensus(
|
|
1964 float inter_sweep_current,
|
|
1965 float inter_sweep_estimate) {
|
|
1966 assert_locked();
|
|
1967 size_t i;
|
|
1968 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
|
1969 FreeList* fl = &_indexedFreeList[i];
|
|
1970 fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
|
|
1971 fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
|
|
1972 fl->set_beforeSweep(fl->count());
|
|
1973 fl->set_bfrSurp(fl->surplus());
|
|
1974 }
|
|
1975 _dictionary->beginSweepDictCensus(CoalSurplusPercent,
|
|
1976 inter_sweep_current,
|
|
1977 inter_sweep_estimate);
|
|
1978 }
|
|
1979
|
|
1980 void CompactibleFreeListSpace::setFLSurplus() {
|
|
1981 assert_locked();
|
|
1982 size_t i;
|
|
1983 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
|
1984 FreeList *fl = &_indexedFreeList[i];
|
|
1985 fl->set_surplus(fl->count() -
|
|
1986 (ssize_t)((double)fl->desired() * SplitSurplusPercent));
|
|
1987 }
|
|
1988 }
|
|
1989
|
|
1990 void CompactibleFreeListSpace::setFLHints() {
|
|
1991 assert_locked();
|
|
1992 size_t i;
|
|
1993 size_t h = IndexSetSize;
|
|
1994 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
|
|
1995 FreeList *fl = &_indexedFreeList[i];
|
|
1996 fl->set_hint(h);
|
|
1997 if (fl->surplus() > 0) {
|
|
1998 h = i;
|
|
1999 }
|
|
2000 }
|
|
2001 }
|
|
2002
|
|
2003 void CompactibleFreeListSpace::clearFLCensus() {
|
|
2004 assert_locked();
|
|
2005 int i;
|
|
2006 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
|
2007 FreeList *fl = &_indexedFreeList[i];
|
|
2008 fl->set_prevSweep(fl->count());
|
|
2009 fl->set_coalBirths(0);
|
|
2010 fl->set_coalDeaths(0);
|
|
2011 fl->set_splitBirths(0);
|
|
2012 fl->set_splitDeaths(0);
|
|
2013 }
|
|
2014 }
|
|
2015
|
|
2016 void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) {
|
|
2017 setFLSurplus();
|
|
2018 setFLHints();
|
|
2019 if (PrintGC && PrintFLSCensus > 0) {
|
|
2020 printFLCensus(sweepCt);
|
|
2021 }
|
|
2022 clearFLCensus();
|
|
2023 assert_locked();
|
|
2024 _dictionary->endSweepDictCensus(SplitSurplusPercent);
|
|
2025 }
|
|
2026
|
|
2027 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
|
|
2028 if (size < SmallForDictionary) {
|
|
2029 FreeList *fl = &_indexedFreeList[size];
|
|
2030 return (fl->coalDesired() < 0) ||
|
|
2031 ((int)fl->count() > fl->coalDesired());
|
|
2032 } else {
|
|
2033 return dictionary()->coalDictOverPopulated(size);
|
|
2034 }
|
|
2035 }
|
|
2036
|
|
2037 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
|
|
2038 assert(size < SmallForDictionary, "Size too large for indexed list");
|
|
2039 FreeList *fl = &_indexedFreeList[size];
|
|
2040 fl->increment_coalBirths();
|
|
2041 fl->increment_surplus();
|
|
2042 }
|
|
2043
|
|
2044 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
|
|
2045 assert(size < SmallForDictionary, "Size too large for indexed list");
|
|
2046 FreeList *fl = &_indexedFreeList[size];
|
|
2047 fl->increment_coalDeaths();
|
|
2048 fl->decrement_surplus();
|
|
2049 }
|
|
2050
|
|
2051 void CompactibleFreeListSpace::coalBirth(size_t size) {
|
|
2052 if (size < SmallForDictionary) {
|
|
2053 smallCoalBirth(size);
|
|
2054 } else {
|
|
2055 dictionary()->dictCensusUpdate(size,
|
|
2056 false /* split */,
|
|
2057 true /* birth */);
|
|
2058 }
|
|
2059 }
|
|
2060
|
|
2061 void CompactibleFreeListSpace::coalDeath(size_t size) {
|
|
2062 if(size < SmallForDictionary) {
|
|
2063 smallCoalDeath(size);
|
|
2064 } else {
|
|
2065 dictionary()->dictCensusUpdate(size,
|
|
2066 false /* split */,
|
|
2067 false /* birth */);
|
|
2068 }
|
|
2069 }
|
|
2070
|
|
2071 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
|
|
2072 assert(size < SmallForDictionary, "Size too large for indexed list");
|
|
2073 FreeList *fl = &_indexedFreeList[size];
|
|
2074 fl->increment_splitBirths();
|
|
2075 fl->increment_surplus();
|
|
2076 }
|
|
2077
|
|
2078 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
|
|
2079 assert(size < SmallForDictionary, "Size too large for indexed list");
|
|
2080 FreeList *fl = &_indexedFreeList[size];
|
|
2081 fl->increment_splitDeaths();
|
|
2082 fl->decrement_surplus();
|
|
2083 }
|
|
2084
|
|
2085 void CompactibleFreeListSpace::splitBirth(size_t size) {
|
|
2086 if (size < SmallForDictionary) {
|
|
2087 smallSplitBirth(size);
|
|
2088 } else {
|
|
2089 dictionary()->dictCensusUpdate(size,
|
|
2090 true /* split */,
|
|
2091 true /* birth */);
|
|
2092 }
|
|
2093 }
|
|
2094
|
|
2095 void CompactibleFreeListSpace::splitDeath(size_t size) {
|
|
2096 if (size < SmallForDictionary) {
|
|
2097 smallSplitDeath(size);
|
|
2098 } else {
|
|
2099 dictionary()->dictCensusUpdate(size,
|
|
2100 true /* split */,
|
|
2101 false /* birth */);
|
|
2102 }
|
|
2103 }
|
|
2104
|
|
2105 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
|
|
2106 size_t to2 = from - to1;
|
|
2107 splitDeath(from);
|
|
2108 splitBirth(to1);
|
|
2109 splitBirth(to2);
|
|
2110 }
|
|
2111
|
|
2112
|
|
2113 void CompactibleFreeListSpace::print() const {
|
|
2114 tty->print(" CompactibleFreeListSpace");
|
|
2115 Space::print();
|
|
2116 }
|
|
2117
|
|
2118 void CompactibleFreeListSpace::prepare_for_verify() {
|
|
2119 assert_locked();
|
|
2120 repairLinearAllocationBlocks();
|
|
2121 // Verify that the SpoolBlocks look like free blocks of
|
|
2122 // appropriate sizes... To be done ...
|
|
2123 }
|
|
2124
|
|
2125 class VerifyAllBlksClosure: public BlkClosure {
|
|
2126 const CompactibleFreeListSpace* _sp;
|
|
2127 const MemRegion _span;
|
|
2128
|
|
2129 public:
|
|
2130 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
|
|
2131 MemRegion span) : _sp(sp), _span(span) { }
|
|
2132
|
|
2133 size_t do_blk(HeapWord* addr) {
|
|
2134 size_t res;
|
|
2135 if (_sp->block_is_obj(addr)) {
|
|
2136 oop p = oop(addr);
|
|
2137 guarantee(p->is_oop(), "Should be an oop");
|
|
2138 res = _sp->adjustObjectSize(p->size());
|
|
2139 if (_sp->obj_is_alive(addr)) {
|
|
2140 p->verify();
|
|
2141 }
|
|
2142 } else {
|
|
2143 FreeChunk* fc = (FreeChunk*)addr;
|
|
2144 res = fc->size();
|
|
2145 if (FLSVerifyLists && !fc->cantCoalesce()) {
|
|
2146 guarantee(_sp->verifyChunkInFreeLists(fc),
|
|
2147 "Chunk should be on a free list");
|
|
2148 }
|
|
2149 }
|
|
2150 guarantee(res != 0, "Livelock: no rank reduction!");
|
|
2151 return res;
|
|
2152 }
|
|
2153 };
|
|
2154
|
|
2155 class VerifyAllOopsClosure: public OopClosure {
|
|
2156 const CMSCollector* _collector;
|
|
2157 const CompactibleFreeListSpace* _sp;
|
|
2158 const MemRegion _span;
|
|
2159 const bool _past_remark;
|
|
2160 const CMSBitMap* _bit_map;
|
|
2161
|
|
2162 public:
|
|
2163 VerifyAllOopsClosure(const CMSCollector* collector,
|
|
2164 const CompactibleFreeListSpace* sp, MemRegion span,
|
|
2165 bool past_remark, CMSBitMap* bit_map) :
|
|
2166 OopClosure(), _collector(collector), _sp(sp), _span(span),
|
|
2167 _past_remark(past_remark), _bit_map(bit_map) { }
|
|
2168
|
|
2169 void do_oop(oop* ptr) {
|
|
2170 oop p = *ptr;
|
|
2171 if (p != NULL) {
|
|
2172 if (_span.contains(p)) { // the interior oop points into CMS heap
|
|
2173 if (!_span.contains(ptr)) { // reference from outside CMS heap
|
|
2174 // Should be a valid object; the first disjunct below allows
|
|
2175 // us to sidestep an assertion in block_is_obj() that insists
|
|
2176 // that p be in _sp. Note that several generations (and spaces)
|
|
2177 // are spanned by _span (CMS heap) above.
|
|
2178 guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
|
|
2179 "Should be an object");
|
|
2180 guarantee(p->is_oop(), "Should be an oop");
|
|
2181 p->verify();
|
|
2182 if (_past_remark) {
|
|
2183 // Remark has been completed, the object should be marked
|
|
2184 _bit_map->isMarked((HeapWord*)p);
|
|
2185 }
|
|
2186 }
|
|
2187 else { // reference within CMS heap
|
|
2188 if (_past_remark) {
|
|
2189 // Remark has been completed -- so the referent should have
|
|
2190 // been marked, if referring object is.
|
|
2191 if (_bit_map->isMarked(_collector->block_start(ptr))) {
|
|
2192 guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
|
|
2193 }
|
|
2194 }
|
|
2195 }
|
|
2196 } else if (_sp->is_in_reserved(ptr)) {
|
|
2197 // the reference is from FLS, and points out of FLS
|
|
2198 guarantee(p->is_oop(), "Should be an oop");
|
|
2199 p->verify();
|
|
2200 }
|
|
2201 }
|
|
2202 }
|
|
2203 };
|
|
2204
|
|
2205 void CompactibleFreeListSpace::verify(bool ignored) const {
|
|
2206 assert_lock_strong(&_freelistLock);
|
|
2207 verify_objects_initialized();
|
|
2208 MemRegion span = _collector->_span;
|
|
2209 bool past_remark = (_collector->abstract_state() ==
|
|
2210 CMSCollector::Sweeping);
|
|
2211
|
|
2212 ResourceMark rm;
|
|
2213 HandleMark hm;
|
|
2214
|
|
2215 // Check integrity of CFL data structures
|
|
2216 _promoInfo.verify();
|
|
2217 _dictionary->verify();
|
|
2218 if (FLSVerifyIndexTable) {
|
|
2219 verifyIndexedFreeLists();
|
|
2220 }
|
|
2221 // Check integrity of all objects and free blocks in space
|
|
2222 {
|
|
2223 VerifyAllBlksClosure cl(this, span);
|
|
2224 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
|
|
2225 }
|
|
2226 // Check that all references in the heap to FLS
|
|
2227 // are to valid objects in FLS or that references in
|
|
2228 // FLS are to valid objects elsewhere in the heap
|
|
2229 if (FLSVerifyAllHeapReferences)
|
|
2230 {
|
|
2231 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
|
|
2232 _collector->markBitMap());
|
|
2233 CollectedHeap* ch = Universe::heap();
|
|
2234 ch->oop_iterate(&cl); // all oops in generations
|
|
2235 ch->permanent_oop_iterate(&cl); // all oops in perm gen
|
|
2236 }
|
|
2237
|
|
2238 if (VerifyObjectStartArray) {
|
|
2239 // Verify the block offset table
|
|
2240 _bt.verify();
|
|
2241 }
|
|
2242 }
|
|
2243
|
|
2244 #ifndef PRODUCT
|
|
2245 void CompactibleFreeListSpace::verifyFreeLists() const {
|
|
2246 if (FLSVerifyLists) {
|
|
2247 _dictionary->verify();
|
|
2248 verifyIndexedFreeLists();
|
|
2249 } else {
|
|
2250 if (FLSVerifyDictionary) {
|
|
2251 _dictionary->verify();
|
|
2252 }
|
|
2253 if (FLSVerifyIndexTable) {
|
|
2254 verifyIndexedFreeLists();
|
|
2255 }
|
|
2256 }
|
|
2257 }
|
|
2258 #endif
|
|
2259
|
|
2260 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
|
|
2261 size_t i = 0;
|
|
2262 for (; i < MinChunkSize; i++) {
|
|
2263 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
|
|
2264 }
|
|
2265 for (; i < IndexSetSize; i++) {
|
|
2266 verifyIndexedFreeList(i);
|
|
2267 }
|
|
2268 }
|
|
2269
|
|
2270 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
|
|
2271 guarantee(size % 2 == 0, "Odd slots should be empty");
|
|
2272 for (FreeChunk* fc = _indexedFreeList[size].head(); fc != NULL;
|
|
2273 fc = fc->next()) {
|
|
2274 guarantee(fc->size() == size, "Size inconsistency");
|
|
2275 guarantee(fc->isFree(), "!free?");
|
|
2276 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
|
|
2277 }
|
|
2278 }
|
|
2279
|
|
2280 #ifndef PRODUCT
|
|
2281 void CompactibleFreeListSpace::checkFreeListConsistency() const {
|
|
2282 assert(_dictionary->minSize() <= IndexSetSize,
|
|
2283 "Some sizes can't be allocated without recourse to"
|
|
2284 " linear allocation buffers");
|
|
2285 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
|
|
2286 "else MIN_TREE_CHUNK_SIZE is wrong");
|
|
2287 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
|
|
2288 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
|
|
2289 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
|
|
2290 "Some for-loops may be incorrectly initialized");
|
|
2291 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
|
|
2292 "For-loops that iterate over IndexSet with stride 2 may be wrong");
|
|
2293 }
|
|
2294 #endif
|
|
2295
|
|
2296 void CompactibleFreeListSpace::printFLCensus(int sweepCt) const {
|
|
2297 assert_lock_strong(&_freelistLock);
|
|
2298 ssize_t bfrSurp = 0;
|
|
2299 ssize_t surplus = 0;
|
|
2300 ssize_t desired = 0;
|
|
2301 ssize_t prevSweep = 0;
|
|
2302 ssize_t beforeSweep = 0;
|
|
2303 ssize_t count = 0;
|
|
2304 ssize_t coalBirths = 0;
|
|
2305 ssize_t coalDeaths = 0;
|
|
2306 ssize_t splitBirths = 0;
|
|
2307 ssize_t splitDeaths = 0;
|
|
2308 gclog_or_tty->print("end sweep# %d\n", sweepCt);
|
|
2309 gclog_or_tty->print("%4s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
|
|
2310 "%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
|
|
2311 "%7s\t" "\n",
|
|
2312 "size", "bfrsurp", "surplus", "desired", "prvSwep",
|
|
2313 "bfrSwep", "count", "cBirths", "cDeaths", "sBirths",
|
|
2314 "sDeaths");
|
|
2315
|
|
2316 size_t totalFree = 0;
|
|
2317 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
|
2318 const FreeList *fl = &_indexedFreeList[i];
|
|
2319 totalFree += fl->count() * fl->size();
|
|
2320
|
|
2321 gclog_or_tty->print("%4d\t" "%7d\t" "%7d\t" "%7d\t"
|
|
2322 "%7d\t" "%7d\t" "%7d\t" "%7d\t"
|
|
2323 "%7d\t" "%7d\t" "%7d\t" "\n",
|
|
2324 fl->size(), fl->bfrSurp(), fl->surplus(), fl->desired(),
|
|
2325 fl->prevSweep(), fl->beforeSweep(), fl->count(), fl->coalBirths(),
|
|
2326 fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
|
|
2327 bfrSurp += fl->bfrSurp();
|
|
2328 surplus += fl->surplus();
|
|
2329 desired += fl->desired();
|
|
2330 prevSweep += fl->prevSweep();
|
|
2331 beforeSweep += fl->beforeSweep();
|
|
2332 count += fl->count();
|
|
2333 coalBirths += fl->coalBirths();
|
|
2334 coalDeaths += fl->coalDeaths();
|
|
2335 splitBirths += fl->splitBirths();
|
|
2336 splitDeaths += fl->splitDeaths();
|
|
2337 }
|
|
2338 gclog_or_tty->print("%4s\t"
|
|
2339 "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t"
|
|
2340 "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" "\n",
|
|
2341 "totl",
|
|
2342 bfrSurp, surplus, desired, prevSweep, beforeSweep,
|
|
2343 count, coalBirths, coalDeaths, splitBirths, splitDeaths);
|
|
2344 gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
|
|
2345 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
|
|
2346 (double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/
|
|
2347 (prevSweep != 0 ? (double)prevSweep : 1.0),
|
|
2348 (double)(desired - count)/(desired != 0 ? (double)desired : 1.0));
|
|
2349 _dictionary->printDictCensus();
|
|
2350 }
|
|
2351
|
|
2352 // Return the next displaced header, incrementing the pointer and
|
|
2353 // recycling spool area as necessary.
|
|
2354 markOop PromotionInfo::nextDisplacedHeader() {
|
|
2355 assert(_spoolHead != NULL, "promotionInfo inconsistency");
|
|
2356 assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
|
|
2357 "Empty spool space: no displaced header can be fetched");
|
|
2358 assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
|
|
2359 markOop hdr = _spoolHead->displacedHdr[_firstIndex];
|
|
2360 // Spool forward
|
|
2361 if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
|
|
2362 // forward to next block, recycling this block into spare spool buffer
|
|
2363 SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
|
|
2364 assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
|
|
2365 _spoolHead->nextSpoolBlock = _spareSpool;
|
|
2366 _spareSpool = _spoolHead;
|
|
2367 _spoolHead = tmp;
|
|
2368 _firstIndex = 1;
|
|
2369 NOT_PRODUCT(
|
|
2370 if (_spoolHead == NULL) { // all buffers fully consumed
|
|
2371 assert(_spoolTail == NULL && _nextIndex == 1,
|
|
2372 "spool buffers processing inconsistency");
|
|
2373 }
|
|
2374 )
|
|
2375 }
|
|
2376 return hdr;
|
|
2377 }
|
|
2378
|
|
2379 void PromotionInfo::track(PromotedObject* trackOop) {
|
|
2380 track(trackOop, oop(trackOop)->klass());
|
|
2381 }
|
|
2382
|
|
2383 void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
|
|
2384 // make a copy of header as it may need to be spooled
|
|
2385 markOop mark = oop(trackOop)->mark();
|
|
2386 trackOop->clearNext();
|
|
2387 if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
|
|
2388 // save non-prototypical header, and mark oop
|
|
2389 saveDisplacedHeader(mark);
|
|
2390 trackOop->setDisplacedMark();
|
|
2391 } else {
|
|
2392 // we'd like to assert something like the following:
|
|
2393 // assert(mark == markOopDesc::prototype(), "consistency check");
|
|
2394 // ... but the above won't work because the age bits have not (yet) been
|
|
2395 // cleared. The remainder of the check would be identical to the
|
|
2396 // condition checked in must_be_preserved() above, so we don't really
|
|
2397 // have anything useful to check here!
|
|
2398 }
|
|
2399 if (_promoTail != NULL) {
|
|
2400 assert(_promoHead != NULL, "List consistency");
|
|
2401 _promoTail->setNext(trackOop);
|
|
2402 _promoTail = trackOop;
|
|
2403 } else {
|
|
2404 assert(_promoHead == NULL, "List consistency");
|
|
2405 _promoHead = _promoTail = trackOop;
|
|
2406 }
|
|
2407 // Mask as newly promoted, so we can skip over such objects
|
|
2408 // when scanning dirty cards
|
|
2409 assert(!trackOop->hasPromotedMark(), "Should not have been marked");
|
|
2410 trackOop->setPromotedMark();
|
|
2411 }
|
|
2412
|
|
2413 // Save the given displaced header, incrementing the pointer and
|
|
2414 // obtaining more spool area as necessary.
|
|
2415 void PromotionInfo::saveDisplacedHeader(markOop hdr) {
|
|
2416 assert(_spoolHead != NULL && _spoolTail != NULL,
|
|
2417 "promotionInfo inconsistency");
|
|
2418 assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
|
|
2419 _spoolTail->displacedHdr[_nextIndex] = hdr;
|
|
2420 // Spool forward
|
|
2421 if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
|
|
2422 // get a new spooling block
|
|
2423 assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
|
|
2424 _splice_point = _spoolTail; // save for splicing
|
|
2425 _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
|
|
2426 _spoolTail = _spoolTail->nextSpoolBlock; // might become NULL ...
|
|
2427 // ... but will attempt filling before next promotion attempt
|
|
2428 _nextIndex = 1;
|
|
2429 }
|
|
2430 }
|
|
2431
|
|
2432 // Ensure that spooling space exists. Return false if spooling space
|
|
2433 // could not be obtained.
|
|
2434 bool PromotionInfo::ensure_spooling_space_work() {
|
|
2435 assert(!has_spooling_space(), "Only call when there is no spooling space");
|
|
2436 // Try and obtain more spooling space
|
|
2437 SpoolBlock* newSpool = getSpoolBlock();
|
|
2438 assert(newSpool == NULL ||
|
|
2439 (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
|
|
2440 "getSpoolBlock() sanity check");
|
|
2441 if (newSpool == NULL) {
|
|
2442 return false;
|
|
2443 }
|
|
2444 _nextIndex = 1;
|
|
2445 if (_spoolTail == NULL) {
|
|
2446 _spoolTail = newSpool;
|
|
2447 if (_spoolHead == NULL) {
|
|
2448 _spoolHead = newSpool;
|
|
2449 _firstIndex = 1;
|
|
2450 } else {
|
|
2451 assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
|
|
2452 "Splice point invariant");
|
|
2453 // Extra check that _splice_point is connected to list
|
|
2454 #ifdef ASSERT
|
|
2455 {
|
|
2456 SpoolBlock* blk = _spoolHead;
|
|
2457 for (; blk->nextSpoolBlock != NULL;
|
|
2458 blk = blk->nextSpoolBlock);
|
|
2459 assert(blk != NULL && blk == _splice_point,
|
|
2460 "Splice point incorrect");
|
|
2461 }
|
|
2462 #endif // ASSERT
|
|
2463 _splice_point->nextSpoolBlock = newSpool;
|
|
2464 }
|
|
2465 } else {
|
|
2466 assert(_spoolHead != NULL, "spool list consistency");
|
|
2467 _spoolTail->nextSpoolBlock = newSpool;
|
|
2468 _spoolTail = newSpool;
|
|
2469 }
|
|
2470 return true;
|
|
2471 }
|
|
2472
|
|
2473 // Get a free spool buffer from the free pool, getting a new block
|
|
2474 // from the heap if necessary.
|
|
2475 SpoolBlock* PromotionInfo::getSpoolBlock() {
|
|
2476 SpoolBlock* res;
|
|
2477 if ((res = _spareSpool) != NULL) {
|
|
2478 _spareSpool = _spareSpool->nextSpoolBlock;
|
|
2479 res->nextSpoolBlock = NULL;
|
|
2480 } else { // spare spool exhausted, get some from heap
|
|
2481 res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
|
|
2482 if (res != NULL) {
|
|
2483 res->init();
|
|
2484 }
|
|
2485 }
|
|
2486 assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
|
|
2487 return res;
|
|
2488 }
|
|
2489
|
|
2490 void PromotionInfo::startTrackingPromotions() {
|
|
2491 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
|
|
2492 "spooling inconsistency?");
|
|
2493 _firstIndex = _nextIndex = 1;
|
|
2494 _tracking = true;
|
|
2495 }
|
|
2496
|
|
2497 void PromotionInfo::stopTrackingPromotions() {
|
|
2498 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
|
|
2499 "spooling inconsistency?");
|
|
2500 _firstIndex = _nextIndex = 1;
|
|
2501 _tracking = false;
|
|
2502 }
|
|
2503
|
|
2504 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
|
|
2505 // points to the next slot available for filling.
|
|
2506 // The set of slots holding displaced headers are then all those in the
|
|
2507 // right-open interval denoted by:
|
|
2508 //
|
|
2509 // [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
|
|
2510 //
|
|
2511 // When _spoolTail is NULL, then the set of slots with displaced headers
|
|
2512 // is all those starting at the slot <_spoolHead, _firstIndex> and
|
|
2513 // going up to the last slot of last block in the linked list.
|
|
2514 // In this lartter case, _splice_point points to the tail block of
|
|
2515 // this linked list of blocks holding displaced headers.
|
|
2516 void PromotionInfo::verify() const {
|
|
2517 // Verify the following:
|
|
2518 // 1. the number of displaced headers matches the number of promoted
|
|
2519 // objects that have displaced headers
|
|
2520 // 2. each promoted object lies in this space
|
|
2521 debug_only(
|
|
2522 PromotedObject* junk = NULL;
|
|
2523 assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
|
|
2524 "Offset of PromotedObject::_next is expected to align with "
|
|
2525 " the OopDesc::_mark within OopDesc");
|
|
2526 )
|
|
2527 // FIXME: guarantee????
|
|
2528 guarantee(_spoolHead == NULL || _spoolTail != NULL ||
|
|
2529 _splice_point != NULL, "list consistency");
|
|
2530 guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
|
|
2531 // count the number of objects with displaced headers
|
|
2532 size_t numObjsWithDisplacedHdrs = 0;
|
|
2533 for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
|
|
2534 guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
|
|
2535 // the last promoted object may fail the mark() != NULL test of is_oop().
|
|
2536 guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
|
|
2537 if (curObj->hasDisplacedMark()) {
|
|
2538 numObjsWithDisplacedHdrs++;
|
|
2539 }
|
|
2540 }
|
|
2541 // Count the number of displaced headers
|
|
2542 size_t numDisplacedHdrs = 0;
|
|
2543 for (SpoolBlock* curSpool = _spoolHead;
|
|
2544 curSpool != _spoolTail && curSpool != NULL;
|
|
2545 curSpool = curSpool->nextSpoolBlock) {
|
|
2546 // the first entry is just a self-pointer; indices 1 through
|
|
2547 // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
|
|
2548 guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
|
|
2549 "first entry of displacedHdr should be self-referential");
|
|
2550 numDisplacedHdrs += curSpool->bufferSize - 1;
|
|
2551 }
|
|
2552 guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
|
|
2553 "internal consistency");
|
|
2554 guarantee(_spoolTail != NULL || _nextIndex == 1,
|
|
2555 "Inconsistency between _spoolTail and _nextIndex");
|
|
2556 // We overcounted (_firstIndex-1) worth of slots in block
|
|
2557 // _spoolHead and we undercounted (_nextIndex-1) worth of
|
|
2558 // slots in block _spoolTail. We make an appropriate
|
|
2559 // adjustment by subtracting the first and adding the
|
|
2560 // second: - (_firstIndex - 1) + (_nextIndex - 1)
|
|
2561 numDisplacedHdrs += (_nextIndex - _firstIndex);
|
|
2562 guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
|
|
2563 }
|
|
2564
|
|
2565
|
|
2566 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
|
|
2567 _cfls(cfls)
|
|
2568 {
|
|
2569 _blocks_to_claim = CMSParPromoteBlocksToClaim;
|
|
2570 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
|
|
2571 i < CompactibleFreeListSpace::IndexSetSize;
|
|
2572 i += CompactibleFreeListSpace::IndexSetStride) {
|
|
2573 _indexedFreeList[i].set_size(i);
|
|
2574 }
|
|
2575 }
|
|
2576
|
|
2577 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
|
|
2578 FreeChunk* res;
|
|
2579 word_sz = _cfls->adjustObjectSize(word_sz);
|
|
2580 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
|
|
2581 // This locking manages sync with other large object allocations.
|
|
2582 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
|
|
2583 Mutex::_no_safepoint_check_flag);
|
|
2584 res = _cfls->getChunkFromDictionaryExact(word_sz);
|
|
2585 if (res == NULL) return NULL;
|
|
2586 } else {
|
|
2587 FreeList* fl = &_indexedFreeList[word_sz];
|
|
2588 bool filled = false; //TRAP
|
|
2589 if (fl->count() == 0) {
|
|
2590 bool filled = true; //TRAP
|
|
2591 // Attempt to refill this local free list.
|
|
2592 _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
|
|
2593 // If it didn't work, give up.
|
|
2594 if (fl->count() == 0) return NULL;
|
|
2595 }
|
|
2596 res = fl->getChunkAtHead();
|
|
2597 assert(res != NULL, "Why was count non-zero?");
|
|
2598 }
|
|
2599 res->markNotFree();
|
|
2600 assert(!res->isFree(), "shouldn't be marked free");
|
|
2601 assert(oop(res)->klass() == NULL, "should look uninitialized");
|
|
2602 // mangle a just allocated object with a distinct pattern.
|
|
2603 debug_only(res->mangleAllocated(word_sz));
|
|
2604 return (HeapWord*)res;
|
|
2605 }
|
|
2606
|
|
2607 void CFLS_LAB::retire() {
|
|
2608 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
|
|
2609 i < CompactibleFreeListSpace::IndexSetSize;
|
|
2610 i += CompactibleFreeListSpace::IndexSetStride) {
|
|
2611 if (_indexedFreeList[i].count() > 0) {
|
|
2612 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
|
|
2613 Mutex::_no_safepoint_check_flag);
|
|
2614 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
|
|
2615 // Reset this list.
|
|
2616 _indexedFreeList[i] = FreeList();
|
|
2617 _indexedFreeList[i].set_size(i);
|
|
2618 }
|
|
2619 }
|
|
2620 }
|
|
2621
|
|
2622 void
|
|
2623 CompactibleFreeListSpace::
|
|
2624 par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
|
|
2625 assert(fl->count() == 0, "Precondition.");
|
|
2626 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
|
|
2627 "Precondition");
|
|
2628
|
|
2629 // We'll try all multiples of word_sz in the indexed set (starting with
|
|
2630 // word_sz itself), then try getting a big chunk and splitting it.
|
|
2631 int k = 1;
|
|
2632 size_t cur_sz = k * word_sz;
|
|
2633 bool found = false;
|
|
2634 while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
|
|
2635 FreeList* gfl = &_indexedFreeList[cur_sz];
|
|
2636 FreeList fl_for_cur_sz; // Empty.
|
|
2637 fl_for_cur_sz.set_size(cur_sz);
|
|
2638 {
|
|
2639 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
|
|
2640 Mutex::_no_safepoint_check_flag);
|
|
2641 if (gfl->count() != 0) {
|
|
2642 size_t nn = MAX2(n/k, (size_t)1);
|
|
2643 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
|
|
2644 found = true;
|
|
2645 }
|
|
2646 }
|
|
2647 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
|
|
2648 if (found) {
|
|
2649 if (k == 1) {
|
|
2650 fl->prepend(&fl_for_cur_sz);
|
|
2651 } else {
|
|
2652 // Divide each block on fl_for_cur_sz up k ways.
|
|
2653 FreeChunk* fc;
|
|
2654 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
|
|
2655 // Must do this in reverse order, so that anybody attempting to
|
|
2656 // access the main chunk sees it as a single free block until we
|
|
2657 // change it.
|
|
2658 size_t fc_size = fc->size();
|
|
2659 for (int i = k-1; i >= 0; i--) {
|
|
2660 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
|
|
2661 ffc->setSize(word_sz);
|
|
2662 ffc->linkNext(NULL);
|
|
2663 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
|
|
2664 // Above must occur before BOT is updated below.
|
|
2665 // splitting from the right, fc_size == (k - i + 1) * wordsize
|
|
2666 _bt.mark_block((HeapWord*)ffc, word_sz);
|
|
2667 fc_size -= word_sz;
|
|
2668 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
|
|
2669 _bt.verify_single_block((HeapWord*)fc, fc_size);
|
|
2670 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
|
|
2671 // Push this on "fl".
|
|
2672 fl->returnChunkAtHead(ffc);
|
|
2673 }
|
|
2674 // TRAP
|
|
2675 assert(fl->tail()->next() == NULL, "List invariant.");
|
|
2676 }
|
|
2677 }
|
|
2678 return;
|
|
2679 }
|
|
2680 k++; cur_sz = k * word_sz;
|
|
2681 }
|
|
2682 // Otherwise, we'll split a block from the dictionary.
|
|
2683 FreeChunk* fc = NULL;
|
|
2684 FreeChunk* rem_fc = NULL;
|
|
2685 size_t rem;
|
|
2686 {
|
|
2687 MutexLockerEx x(parDictionaryAllocLock(),
|
|
2688 Mutex::_no_safepoint_check_flag);
|
|
2689 while (n > 0) {
|
|
2690 fc = dictionary()->getChunk(MAX2(n * word_sz,
|
|
2691 _dictionary->minSize()),
|
|
2692 FreeBlockDictionary::atLeast);
|
|
2693 if (fc != NULL) {
|
|
2694 _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk
|
|
2695 dictionary()->dictCensusUpdate(fc->size(),
|
|
2696 true /*split*/,
|
|
2697 false /*birth*/);
|
|
2698 break;
|
|
2699 } else {
|
|
2700 n--;
|
|
2701 }
|
|
2702 }
|
|
2703 if (fc == NULL) return;
|
|
2704 // Otherwise, split up that block.
|
|
2705 size_t nn = fc->size() / word_sz;
|
|
2706 n = MIN2(nn, n);
|
|
2707 rem = fc->size() - n * word_sz;
|
|
2708 // If there is a remainder, and it's too small, allocate one fewer.
|
|
2709 if (rem > 0 && rem < MinChunkSize) {
|
|
2710 n--; rem += word_sz;
|
|
2711 }
|
|
2712 // First return the remainder, if any.
|
|
2713 // Note that we hold the lock until we decide if we're going to give
|
|
2714 // back the remainder to the dictionary, since a contending allocator
|
|
2715 // may otherwise see the heap as empty. (We're willing to take that
|
|
2716 // hit if the block is a small block.)
|
|
2717 if (rem > 0) {
|
|
2718 size_t prefix_size = n * word_sz;
|
|
2719 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
|
|
2720 rem_fc->setSize(rem);
|
|
2721 rem_fc->linkNext(NULL);
|
|
2722 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
|
|
2723 // Above must occur before BOT is updated below.
|
|
2724 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
|
|
2725 if (rem >= IndexSetSize) {
|
|
2726 returnChunkToDictionary(rem_fc);
|
|
2727 dictionary()->dictCensusUpdate(fc->size(),
|
|
2728 true /*split*/,
|
|
2729 true /*birth*/);
|
|
2730 rem_fc = NULL;
|
|
2731 }
|
|
2732 // Otherwise, return it to the small list below.
|
|
2733 }
|
|
2734 }
|
|
2735 //
|
|
2736 if (rem_fc != NULL) {
|
|
2737 MutexLockerEx x(_indexedFreeListParLocks[rem],
|
|
2738 Mutex::_no_safepoint_check_flag);
|
|
2739 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
|
|
2740 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
|
|
2741 smallSplitBirth(rem);
|
|
2742 }
|
|
2743
|
|
2744 // Now do the splitting up.
|
|
2745 // Must do this in reverse order, so that anybody attempting to
|
|
2746 // access the main chunk sees it as a single free block until we
|
|
2747 // change it.
|
|
2748 size_t fc_size = n * word_sz;
|
|
2749 // All but first chunk in this loop
|
|
2750 for (ssize_t i = n-1; i > 0; i--) {
|
|
2751 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
|
|
2752 ffc->setSize(word_sz);
|
|
2753 ffc->linkNext(NULL);
|
|
2754 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
|
|
2755 // Above must occur before BOT is updated below.
|
|
2756 // splitting from the right, fc_size == (n - i + 1) * wordsize
|
|
2757 _bt.mark_block((HeapWord*)ffc, word_sz);
|
|
2758 fc_size -= word_sz;
|
|
2759 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
|
|
2760 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
|
|
2761 _bt.verify_single_block((HeapWord*)fc, fc_size);
|
|
2762 // Push this on "fl".
|
|
2763 fl->returnChunkAtHead(ffc);
|
|
2764 }
|
|
2765 // First chunk
|
|
2766 fc->setSize(word_sz);
|
|
2767 fc->linkNext(NULL);
|
|
2768 fc->linkPrev(NULL);
|
|
2769 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
|
|
2770 _bt.verify_single_block((HeapWord*)fc, fc->size());
|
|
2771 fl->returnChunkAtHead(fc);
|
|
2772
|
|
2773 {
|
|
2774 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
|
|
2775 Mutex::_no_safepoint_check_flag);
|
|
2776 ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
|
|
2777 _indexedFreeList[word_sz].set_splitBirths(new_births);
|
|
2778 ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
|
|
2779 _indexedFreeList[word_sz].set_surplus(new_surplus);
|
|
2780 }
|
|
2781
|
|
2782 // TRAP
|
|
2783 assert(fl->tail()->next() == NULL, "List invariant.");
|
|
2784 }
|
|
2785
|
|
2786 // Set up the space's par_seq_tasks structure for work claiming
|
|
2787 // for parallel rescan. See CMSParRemarkTask where this is currently used.
|
|
2788 // XXX Need to suitably abstract and generalize this and the next
|
|
2789 // method into one.
|
|
2790 void
|
|
2791 CompactibleFreeListSpace::
|
|
2792 initialize_sequential_subtasks_for_rescan(int n_threads) {
|
|
2793 // The "size" of each task is fixed according to rescan_task_size.
|
|
2794 assert(n_threads > 0, "Unexpected n_threads argument");
|
|
2795 const size_t task_size = rescan_task_size();
|
|
2796 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
|
|
2797 assert((used_region().start() + (n_tasks - 1)*task_size <
|
|
2798 used_region().end()) &&
|
|
2799 (used_region().start() + n_tasks*task_size >=
|
|
2800 used_region().end()), "n_task calculation incorrect");
|
|
2801 SequentialSubTasksDone* pst = conc_par_seq_tasks();
|
|
2802 assert(!pst->valid(), "Clobbering existing data?");
|
|
2803 pst->set_par_threads(n_threads);
|
|
2804 pst->set_n_tasks((int)n_tasks);
|
|
2805 }
|
|
2806
|
|
2807 // Set up the space's par_seq_tasks structure for work claiming
|
|
2808 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
|
|
2809 void
|
|
2810 CompactibleFreeListSpace::
|
|
2811 initialize_sequential_subtasks_for_marking(int n_threads,
|
|
2812 HeapWord* low) {
|
|
2813 // The "size" of each task is fixed according to rescan_task_size.
|
|
2814 assert(n_threads > 0, "Unexpected n_threads argument");
|
|
2815 const size_t task_size = marking_task_size();
|
|
2816 assert(task_size > CardTableModRefBS::card_size_in_words &&
|
|
2817 (task_size % CardTableModRefBS::card_size_in_words == 0),
|
|
2818 "Otherwise arithmetic below would be incorrect");
|
|
2819 MemRegion span = _gen->reserved();
|
|
2820 if (low != NULL) {
|
|
2821 if (span.contains(low)) {
|
|
2822 // Align low down to a card boundary so that
|
|
2823 // we can use block_offset_careful() on span boundaries.
|
|
2824 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
|
|
2825 CardTableModRefBS::card_size);
|
|
2826 // Clip span prefix at aligned_low
|
|
2827 span = span.intersection(MemRegion(aligned_low, span.end()));
|
|
2828 } else if (low > span.end()) {
|
|
2829 span = MemRegion(low, low); // Null region
|
|
2830 } // else use entire span
|
|
2831 }
|
|
2832 assert(span.is_empty() ||
|
|
2833 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
|
|
2834 "span should start at a card boundary");
|
|
2835 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
|
|
2836 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
|
|
2837 assert(n_tasks == 0 ||
|
|
2838 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
|
|
2839 (span.start() + n_tasks*task_size >= span.end())),
|
|
2840 "n_task calculation incorrect");
|
|
2841 SequentialSubTasksDone* pst = conc_par_seq_tasks();
|
|
2842 assert(!pst->valid(), "Clobbering existing data?");
|
|
2843 pst->set_par_threads(n_threads);
|
|
2844 pst->set_n_tasks((int)n_tasks);
|
|
2845 }
|