# HG changeset patch # User ysr # Date 1319598941 25200 # Node ID c084129041499406e4e4c24d61816c0e8727d5ec # Parent c6a6e936dc688db899b6c1f9b4956bd6f397e524 7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list Summary: Suitably weaken asserts that were in each case a tad too strong; fix up some loose uses of parameters in code related to size-indexed free list table. Reviewed-by: jmasa, brutisso, stefank diff -r c6a6e936dc68 -r c08412904149 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Sun Oct 23 23:06:06 2011 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Oct 25 20:15:41 2011 -0700 @@ -62,7 +62,7 @@ MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment; assert(IndexSetStart == 0 && IndexSetStride == 0, "already set"); - IndexSetStart = MinObjAlignment; + IndexSetStart = (int) MinChunkSize; IndexSetStride = MinObjAlignment; } @@ -138,7 +138,7 @@ } else { _fitStrategy = FreeBlockStrategyNone; } - checkFreeListConsistency(); + check_free_list_consistency(); // Initialize locks for parallel case. @@ -1358,17 +1358,29 @@ ShouldNotReachHere(); } -bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) - const { +bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const { assert(fc->size() < IndexSetSize, "Size of chunk is too large"); return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc); } +bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const { + assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) || + (_smallLinearAllocBlock._word_size == fc->size()), + "Linear allocation block shows incorrect size"); + return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) && + (_smallLinearAllocBlock._word_size == fc->size())); +} + +// Check if the purported free chunk is present either as a linear +// allocation block, the size-indexed table of (smaller) free blocks, +// or the larger free blocks kept in the binary tree dictionary. bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const { - if (fc->size() >= IndexSetSize) { + if (verify_chunk_is_linear_alloc_block(fc)) { + return true; + } else if (fc->size() < IndexSetSize) { + return verifyChunkInIndexedFreeLists(fc); + } else { return dictionary()->verifyChunkInFreeLists(fc); - } else { - return verifyChunkInIndexedFreeLists(fc); } } @@ -2495,7 +2507,8 @@ FreeChunk* tail = _indexedFreeList[size].tail(); size_t num = _indexedFreeList[size].count(); size_t n = 0; - guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty"); + guarantee(((size >= MinChunkSize) && (size % IndexSetStride == 0)) || fc == NULL, + "Slot should have been empty"); for (; fc != NULL; fc = fc->next(), n++) { guarantee(fc->size() == size, "Size inconsistency"); guarantee(fc->isFree(), "!free?"); @@ -2506,14 +2519,14 @@ } #ifndef PRODUCT -void CompactibleFreeListSpace::checkFreeListConsistency() const { +void CompactibleFreeListSpace::check_free_list_consistency() const { assert(_dictionary->minSize() <= IndexSetSize, "Some sizes can't be allocated without recourse to" " linear allocation buffers"); assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk), "else MIN_TREE_CHUNK_SIZE is wrong"); - assert((IndexSetStride == 2 && IndexSetStart == 2) || - (IndexSetStride == 1 && IndexSetStart == 1), "just checking"); + assert((IndexSetStride == 2 && IndexSetStart == 4) || // 32-bit + (IndexSetStride == 1 && IndexSetStart == 3), "just checking"); // 64-bit assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0), "Some for-loops may be incorrectly initialized"); assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1), @@ -2688,33 +2701,27 @@ } } +// If this is changed in the future to allow parallel +// access, one would need to take the FL locks and, +// depending on how it is used, stagger access from +// parallel threads to reduce contention. void CFLS_LAB::retire(int tid) { // We run this single threaded with the world stopped; // so no need for locks and such. -#define CFLS_LAB_PARALLEL_ACCESS 0 NOT_PRODUCT(Thread* t = Thread::current();) assert(Thread::current()->is_VM_thread(), "Error"); - assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride, - "Will access to uninitialized slot below"); -#if CFLS_LAB_PARALLEL_ACCESS - for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1; - i > 0; - i -= CompactibleFreeListSpace::IndexSetStride) { -#else // CFLS_LAB_PARALLEL_ACCESS for (size_t i = CompactibleFreeListSpace::IndexSetStart; i < CompactibleFreeListSpace::IndexSetSize; i += CompactibleFreeListSpace::IndexSetStride) { -#endif // !CFLS_LAB_PARALLEL_ACCESS assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(), "Can't retire more than what we obtained"); if (_num_blocks[i] > 0) { size_t num_retire = _indexedFreeList[i].count(); assert(_num_blocks[i] > num_retire, "Should have used at least one"); { -#if CFLS_LAB_PARALLEL_ACCESS - MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], - Mutex::_no_safepoint_check_flag); -#endif // CFLS_LAB_PARALLEL_ACCESS + // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], + // Mutex::_no_safepoint_check_flag); + // Update globals stats for num_blocks used _global_num_blocks[i] += (_num_blocks[i] - num_retire); _global_num_workers[i]++; diff -r c6a6e936dc68 -r c08412904149 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Sun Oct 23 23:06:06 2011 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Oct 25 20:15:41 2011 -0700 @@ -502,10 +502,14 @@ void verifyFreeLists() const PRODUCT_RETURN; void verifyIndexedFreeLists() const; void verifyIndexedFreeList(size_t size) const; - // verify that the given chunk is in the free lists. + // Verify that the given chunk is in the free lists: + // i.e. either the binary tree dictionary, the indexed free lists + // or the linear allocation block. bool verifyChunkInFreeLists(FreeChunk* fc) const; + // Verify that the given chunk is the linear allocation block + bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; // Do some basic checks on the the free lists. - void checkFreeListConsistency() const PRODUCT_RETURN; + void check_free_list_consistency() const PRODUCT_RETURN; // Printing support void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);