comparison src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp @ 1145:e018e6884bd8

6631166: CMS: better heuristics when combatting fragmentation Summary: Autonomic per-worker free block cache sizing, tunable coalition policies, fixes to per-size block statistics, retuned gain and bandwidth of some feedback loop filters to allow quicker reactivity to abrupt changes in ambient demand, and other heuristics to reduce fragmentation of the CMS old gen. Also tightened some assertions, including those related to locking. Reviewed-by: jmasa
author ysr
date Wed, 23 Dec 2009 09:23:54 -0800
parents d1605aabd0a1
children c18cbe5936b8
comparison
equal deleted inserted replaced
1111:44f61c24ddab 1145:e018e6884bd8
79 set_head(NULL); 79 set_head(NULL);
80 set_tail(NULL); 80 set_tail(NULL);
81 set_hint(hint); 81 set_hint(hint);
82 } 82 }
83 83
84 void FreeList::init_statistics() { 84 void FreeList::init_statistics(bool split_birth) {
85 _allocation_stats.initialize(); 85 _allocation_stats.initialize(split_birth);
86 } 86 }
87 87
88 FreeChunk* FreeList::getChunkAtHead() { 88 FreeChunk* FreeList::getChunkAtHead() {
89 assert_proper_lock_protection(); 89 assert_proper_lock_protection();
90 assert(head() == NULL || head()->prev() == NULL, "list invariant"); 90 assert(head() == NULL || head()->prev() == NULL, "list invariant");
290 } 290 }
291 return false; 291 return false;
292 } 292 }
293 293
294 #ifndef PRODUCT 294 #ifndef PRODUCT
295 void FreeList::verify_stats() const {
296 // The +1 of the LH comparand is to allow some "looseness" in
297 // checking: we usually call this interface when adding a block
298 // and we'll subsequently update the stats; we cannot update the
299 // stats beforehand because in the case of the large-block BT
300 // dictionary for example, this might be the first block and
301 // in that case there would be no place that we could record
302 // the stats (which are kept in the block itself).
303 assert(_allocation_stats.prevSweep() + _allocation_stats.splitBirths() + 1 // Total Stock + 1
304 >= _allocation_stats.splitDeaths() + (ssize_t)count(), "Conservation Principle");
305 }
306
295 void FreeList::assert_proper_lock_protection_work() const { 307 void FreeList::assert_proper_lock_protection_work() const {
296 #ifdef ASSERT 308 assert(_protecting_lock != NULL, "Don't call this directly");
297 if (_protecting_lock != NULL && 309 assert(ParallelGCThreads > 0, "Don't call this directly");
298 SharedHeap::heap()->n_par_threads() > 0) { 310 Thread* thr = Thread::current();
299 // Should become an assert. 311 if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
300 guarantee(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED"); 312 // assert that we are holding the freelist lock
301 } 313 } else if (thr->is_GC_task_thread()) {
302 #endif 314 assert(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
315 } else if (thr->is_Java_thread()) {
316 assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
317 } else {
318 ShouldNotReachHere(); // unaccounted thread type?
319 }
303 } 320 }
304 #endif 321 #endif
305 322
306 // Print the "label line" for free list stats. 323 // Print the "label line" for free list stats.
307 void FreeList::print_labels_on(outputStream* st, const char* c) { 324 void FreeList::print_labels_on(outputStream* st, const char* c) {