comparison src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @ 4024:c08412904149

7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list Summary: Suitably weaken asserts that were in each case a tad too strong; fix up some loose uses of parameters in code related to size-indexed free list table. Reviewed-by: jmasa, brutisso, stefank
author ysr
date Tue, 25 Oct 2011 20:15:41 -0700
parents 1e3493ac2d11
children 5a5ed80bea5b
comparison
equal deleted inserted replaced
4023:c6a6e936dc68 4024:c08412904149
60 assert(MinChunkSize == 0, "already set"); 60 assert(MinChunkSize == 0, "already set");
61 #define numQuanta(x,y) ((x+y-1)/y) 61 #define numQuanta(x,y) ((x+y-1)/y)
62 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment; 62 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
63 63
64 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set"); 64 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
65 IndexSetStart = MinObjAlignment; 65 IndexSetStart = (int) MinChunkSize;
66 IndexSetStride = MinObjAlignment; 66 IndexSetStride = MinObjAlignment;
67 } 67 }
68 68
69 // Constructor 69 // Constructor
70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, 70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
136 if (UseCMSBestFit) { 136 if (UseCMSBestFit) {
137 _fitStrategy = FreeBlockBestFitFirst; 137 _fitStrategy = FreeBlockBestFitFirst;
138 } else { 138 } else {
139 _fitStrategy = FreeBlockStrategyNone; 139 _fitStrategy = FreeBlockStrategyNone;
140 } 140 }
141 checkFreeListConsistency(); 141 check_free_list_consistency();
142 142
143 // Initialize locks for parallel case. 143 // Initialize locks for parallel case.
144 144
145 if (CollectedHeap::use_parallel_gc_threads()) { 145 if (CollectedHeap::use_parallel_gc_threads()) {
146 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { 146 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1356 return ret; 1356 return ret;
1357 } 1357 }
1358 ShouldNotReachHere(); 1358 ShouldNotReachHere();
1359 } 1359 }
1360 1360
1361 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) 1361 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1362 const {
1363 assert(fc->size() < IndexSetSize, "Size of chunk is too large"); 1362 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1364 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc); 1363 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
1365 } 1364 }
1366 1365
1366 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1367 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1368 (_smallLinearAllocBlock._word_size == fc->size()),
1369 "Linear allocation block shows incorrect size");
1370 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1371 (_smallLinearAllocBlock._word_size == fc->size()));
1372 }
1373
1374 // Check if the purported free chunk is present either as a linear
1375 // allocation block, the size-indexed table of (smaller) free blocks,
1376 // or the larger free blocks kept in the binary tree dictionary.
1367 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const { 1377 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
1368 if (fc->size() >= IndexSetSize) { 1378 if (verify_chunk_is_linear_alloc_block(fc)) {
1379 return true;
1380 } else if (fc->size() < IndexSetSize) {
1381 return verifyChunkInIndexedFreeLists(fc);
1382 } else {
1369 return dictionary()->verifyChunkInFreeLists(fc); 1383 return dictionary()->verifyChunkInFreeLists(fc);
1370 } else {
1371 return verifyChunkInIndexedFreeLists(fc);
1372 } 1384 }
1373 } 1385 }
1374 1386
1375 #ifndef PRODUCT 1387 #ifndef PRODUCT
1376 void CompactibleFreeListSpace::assert_locked() const { 1388 void CompactibleFreeListSpace::assert_locked() const {
2493 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { 2505 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2494 FreeChunk* fc = _indexedFreeList[size].head(); 2506 FreeChunk* fc = _indexedFreeList[size].head();
2495 FreeChunk* tail = _indexedFreeList[size].tail(); 2507 FreeChunk* tail = _indexedFreeList[size].tail();
2496 size_t num = _indexedFreeList[size].count(); 2508 size_t num = _indexedFreeList[size].count();
2497 size_t n = 0; 2509 size_t n = 0;
2498 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty"); 2510 guarantee(((size >= MinChunkSize) && (size % IndexSetStride == 0)) || fc == NULL,
2511 "Slot should have been empty");
2499 for (; fc != NULL; fc = fc->next(), n++) { 2512 for (; fc != NULL; fc = fc->next(), n++) {
2500 guarantee(fc->size() == size, "Size inconsistency"); 2513 guarantee(fc->size() == size, "Size inconsistency");
2501 guarantee(fc->isFree(), "!free?"); 2514 guarantee(fc->isFree(), "!free?");
2502 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list"); 2515 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2503 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail"); 2516 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2504 } 2517 }
2505 guarantee(n == num, "Incorrect count"); 2518 guarantee(n == num, "Incorrect count");
2506 } 2519 }
2507 2520
2508 #ifndef PRODUCT 2521 #ifndef PRODUCT
2509 void CompactibleFreeListSpace::checkFreeListConsistency() const { 2522 void CompactibleFreeListSpace::check_free_list_consistency() const {
2510 assert(_dictionary->minSize() <= IndexSetSize, 2523 assert(_dictionary->minSize() <= IndexSetSize,
2511 "Some sizes can't be allocated without recourse to" 2524 "Some sizes can't be allocated without recourse to"
2512 " linear allocation buffers"); 2525 " linear allocation buffers");
2513 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk), 2526 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2514 "else MIN_TREE_CHUNK_SIZE is wrong"); 2527 "else MIN_TREE_CHUNK_SIZE is wrong");
2515 assert((IndexSetStride == 2 && IndexSetStart == 2) || 2528 assert((IndexSetStride == 2 && IndexSetStart == 4) || // 32-bit
2516 (IndexSetStride == 1 && IndexSetStart == 1), "just checking"); 2529 (IndexSetStride == 1 && IndexSetStart == 3), "just checking"); // 64-bit
2517 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0), 2530 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2518 "Some for-loops may be incorrectly initialized"); 2531 "Some for-loops may be incorrectly initialized");
2519 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1), 2532 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2520 "For-loops that iterate over IndexSet with stride 2 may be wrong"); 2533 "For-loops that iterate over IndexSet with stride 2 may be wrong");
2521 } 2534 }
2686 } 2699 }
2687 } 2700 }
2688 } 2701 }
2689 } 2702 }
2690 2703
2704 // If this is changed in the future to allow parallel
2705 // access, one would need to take the FL locks and,
2706 // depending on how it is used, stagger access from
2707 // parallel threads to reduce contention.
2691 void CFLS_LAB::retire(int tid) { 2708 void CFLS_LAB::retire(int tid) {
2692 // We run this single threaded with the world stopped; 2709 // We run this single threaded with the world stopped;
2693 // so no need for locks and such. 2710 // so no need for locks and such.
2694 #define CFLS_LAB_PARALLEL_ACCESS 0
2695 NOT_PRODUCT(Thread* t = Thread::current();) 2711 NOT_PRODUCT(Thread* t = Thread::current();)
2696 assert(Thread::current()->is_VM_thread(), "Error"); 2712 assert(Thread::current()->is_VM_thread(), "Error");
2697 assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
2698 "Will access to uninitialized slot below");
2699 #if CFLS_LAB_PARALLEL_ACCESS
2700 for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
2701 i > 0;
2702 i -= CompactibleFreeListSpace::IndexSetStride) {
2703 #else // CFLS_LAB_PARALLEL_ACCESS
2704 for (size_t i = CompactibleFreeListSpace::IndexSetStart; 2713 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2705 i < CompactibleFreeListSpace::IndexSetSize; 2714 i < CompactibleFreeListSpace::IndexSetSize;
2706 i += CompactibleFreeListSpace::IndexSetStride) { 2715 i += CompactibleFreeListSpace::IndexSetStride) {
2707 #endif // !CFLS_LAB_PARALLEL_ACCESS
2708 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(), 2716 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2709 "Can't retire more than what we obtained"); 2717 "Can't retire more than what we obtained");
2710 if (_num_blocks[i] > 0) { 2718 if (_num_blocks[i] > 0) {
2711 size_t num_retire = _indexedFreeList[i].count(); 2719 size_t num_retire = _indexedFreeList[i].count();
2712 assert(_num_blocks[i] > num_retire, "Should have used at least one"); 2720 assert(_num_blocks[i] > num_retire, "Should have used at least one");
2713 { 2721 {
2714 #if CFLS_LAB_PARALLEL_ACCESS 2722 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2715 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], 2723 // Mutex::_no_safepoint_check_flag);
2716 Mutex::_no_safepoint_check_flag); 2724
2717 #endif // CFLS_LAB_PARALLEL_ACCESS
2718 // Update globals stats for num_blocks used 2725 // Update globals stats for num_blocks used
2719 _global_num_blocks[i] += (_num_blocks[i] - num_retire); 2726 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2720 _global_num_workers[i]++; 2727 _global_num_workers[i]++;
2721 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big"); 2728 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
2722 if (num_retire > 0) { 2729 if (num_retire > 0) {