diff src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @ 1716:be3f9c242c9d

6948538: CMS: BOT walkers can fall into object allocation and initialization cracks Summary: GC workers now recognize an intermediate transient state of blocks which are allocated but have not yet completed initialization. blk_start() calls do not attempt to determine the size of a block in the transient state, rather waiting for the block to become initialized so that it is safe to query its size. Audited and ensured the order of initialization of object fields (klass, free bit and size) to respect block state transition protocol. Also included some new assertion checking code enabled in debug mode. Reviewed-by: chrisphi, johnc, poonam
author ysr
date Mon, 16 Aug 2010 15:58:42 -0700
parents e9ff18c4ace7
children 179464550c7d
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Sat Aug 14 00:47:52 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Mon Aug 16 15:58:42 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -402,6 +402,29 @@
   return res;
 }
 
+void LinearAllocBlock::print_on(outputStream* st) const {
+  st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
+            ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
+            _ptr, _word_size, _refillSize, _allocation_size_limit);
+}
+
+void CompactibleFreeListSpace::print_on(outputStream* st) const {
+  st->print_cr("COMPACTIBLE FREELIST SPACE");
+  st->print_cr(" Space:");
+  Space::print_on(st);
+
+  st->print_cr("promoInfo:");
+  _promoInfo.print_on(st);
+
+  st->print_cr("_smallLinearAllocBlock");
+  _smallLinearAllocBlock.print_on(st);
+
+  // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
+
+  st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
+               _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
+}
+
 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 const {
   reportIndexedFreeListStatistics();
@@ -557,13 +580,15 @@
 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   HeapWord* prevEnd = end();
   assert(prevEnd != value, "unnecessary set_end call");
-  assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
+  assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
+        "New end is below unallocated block");
   _end = value;
   if (prevEnd != NULL) {
     // Resize the underlying block offset table.
     _bt.resize(pointer_delta(value, bottom()));
     if (value <= prevEnd) {
-      assert(value >= unallocated_block(), "New end is below unallocated block");
+      assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
+             "New end is below unallocated block");
     } else {
       // Now, take this new chunk and add it to the free blocks.
       // Note that the BOT has not yet been updated for this block.
@@ -938,7 +963,6 @@
 
 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   NOT_PRODUCT(verify_objects_initialized());
-  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
   // This must be volatile, or else there is a danger that the compiler
   // will compile the code below into a sometimes-infinite loop, by keeping
   // the value read the first time in a register.
@@ -957,7 +981,7 @@
       // must read from what 'p' points to in each loop.
       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
       if (k != NULL) {
-        assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
+        assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
         oop o = (oop)p;
         assert(o->is_parsable(), "Should be parsable");
         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
@@ -1231,7 +1255,6 @@
         // satisfy the request.  This is different that
         // evm.
         // Don't record chunk off a LinAB?  smallSplitBirth(size);
-
     } else {
       // Raid the exact free lists larger than size, even if they are not
       // overpopulated.
@@ -1449,6 +1472,7 @@
     // Update BOT last so that other (parallel) GC threads see a consistent
     // view of the BOT and free blocks.
     // Above must occur before BOT is updated below.
+    OrderAccess::storestore();
     _bt.split_block(res, blk_size, size);  // adjust block offset table
   }
   return res;
@@ -1477,6 +1501,7 @@
     // Update BOT last so that other (parallel) GC threads see a consistent
     // view of the BOT and free blocks.
     // Above must occur before BOT is updated below.
+    OrderAccess::storestore();
     _bt.split_block(res, blk_size, size);  // adjust block offset table
     _bt.allocated(res, size);
   }
@@ -1856,6 +1881,8 @@
   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
   // Above must occur before BOT is updated below.
   // adjust block offset table
+  OrderAccess::storestore();
+  assert(chunk->isFree() && ffc->isFree(), "Error");
   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
   if (rem_size < SmallForDictionary) {
     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
@@ -1911,8 +1938,7 @@
   // mark the "end" of the used space at the time of this call;
   // note, however, that promoted objects from this point
   // on are tracked in the _promoInfo below.
-  set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
-                      unallocated_block() : end());
+  set_saved_mark_word(unallocated_block());
   // inform allocator that promotions should be tracked.
   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
   _promoInfo.startTrackingPromotions();
@@ -2238,8 +2264,7 @@
 }
 
 void CompactibleFreeListSpace::print() const {
-  tty->print(" CompactibleFreeListSpace");
-  Space::print();
+  Space::print_on(tty);
 }
 
 void CompactibleFreeListSpace::prepare_for_verify() {
@@ -2253,18 +2278,28 @@
  private:
   const CompactibleFreeListSpace* _sp;
   const MemRegion                 _span;
+  HeapWord*                       _last_addr;
+  size_t                          _last_size;
+  bool                            _last_was_obj;
+  bool                            _last_was_live;
 
  public:
   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
-    MemRegion span) :  _sp(sp), _span(span) { }
+    MemRegion span) :  _sp(sp), _span(span),
+                       _last_addr(NULL), _last_size(0),
+                       _last_was_obj(false), _last_was_live(false) { }
 
   virtual size_t do_blk(HeapWord* addr) {
     size_t res;
+    bool   was_obj  = false;
+    bool   was_live = false;
     if (_sp->block_is_obj(addr)) {
+      was_obj = true;
       oop p = oop(addr);
       guarantee(p->is_oop(), "Should be an oop");
       res = _sp->adjustObjectSize(p->size());
       if (_sp->obj_is_alive(addr)) {
+        was_live = true;
         p->verify();
       }
     } else {
@@ -2275,7 +2310,20 @@
                   "Chunk should be on a free list");
       }
     }
-    guarantee(res != 0, "Livelock: no rank reduction!");
+    if (res == 0) {
+      gclog_or_tty->print_cr("Livelock: no rank reduction!");
+      gclog_or_tty->print_cr(
+        " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
+        " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
+        addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
+        _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
+      _sp->print_on(gclog_or_tty);
+      guarantee(false, "Seppuku!");
+    }
+    _last_addr = addr;
+    _last_size = res;
+    _last_was_obj  = was_obj;
+    _last_was_live = was_live;
     return res;
   }
 };
@@ -2521,7 +2569,7 @@
 
 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
   FreeChunk* res;
-  word_sz = _cfls->adjustObjectSize(word_sz);
+  guarantee(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
     // This locking manages sync with other large object allocations.
     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
@@ -2667,12 +2715,12 @@
          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
          (CMSSplitIndexedFreeListBlocks || k <= 1);
          k++, cur_sz = k * word_sz) {
-      FreeList* gfl = &_indexedFreeList[cur_sz];
       FreeList fl_for_cur_sz;  // Empty.
       fl_for_cur_sz.set_size(cur_sz);
       {
         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
                         Mutex::_no_safepoint_check_flag);
+        FreeList* gfl = &_indexedFreeList[cur_sz];
         if (gfl->count() != 0) {
           // nn is the number of chunks of size cur_sz that
           // we'd need to split k-ways each, in order to create
@@ -2685,9 +2733,9 @@
             // we increment the split death count by the number of blocks
             // we just took from the cur_sz-size blocks list and which
             // we will be splitting below.
-            ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() +
+            ssize_t deaths = gfl->splitDeaths() +
                              fl_for_cur_sz.count();
-            _indexedFreeList[cur_sz].set_splitDeaths(deaths);
+            gfl->set_splitDeaths(deaths);
           }
         }
       }
@@ -2703,18 +2751,25 @@
             // access the main chunk sees it as a single free block until we
             // change it.
             size_t fc_size = fc->size();
+            assert(fc->isFree(), "Error");
             for (int i = k-1; i >= 0; i--) {
               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
+              assert((i != 0) ||
+                        ((fc == ffc) && ffc->isFree() &&
+                         (ffc->size() == k*word_sz) && (fc_size == word_sz)),
+                        "Counting error");
               ffc->setSize(word_sz);
+              ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
               ffc->linkNext(NULL);
-              ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
               // Above must occur before BOT is updated below.
-              // splitting from the right, fc_size == (k - i + 1) * wordsize
-              _bt.mark_block((HeapWord*)ffc, word_sz);
+              OrderAccess::storestore();
+              // splitting from the right, fc_size == i * word_sz
+              _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
               fc_size -= word_sz;
-              _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
+              assert(fc_size == i*word_sz, "Error");
+              _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
               _bt.verify_single_block((HeapWord*)fc, fc_size);
-              _bt.verify_single_block((HeapWord*)ffc, ffc->size());
+              _bt.verify_single_block((HeapWord*)ffc, word_sz);
               // Push this on "fl".
               fl->returnChunkAtHead(ffc);
             }
@@ -2744,7 +2799,7 @@
                                   _dictionary->minSize()),
                                   FreeBlockDictionary::atLeast);
       if (fc != NULL) {
-        _bt.allocated((HeapWord*)fc, fc->size());  // update _unallocated_blk
+        _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
         dictionary()->dictCensusUpdate(fc->size(),
                                        true /*split*/,
                                        false /*birth*/);
@@ -2754,8 +2809,10 @@
       }
     }
     if (fc == NULL) return;
+    // Otherwise, split up that block.
     assert((ssize_t)n >= 1, "Control point invariant");
-    // Otherwise, split up that block.
+    assert(fc->isFree(), "Error: should be a free block");
+    _bt.verify_single_block((HeapWord*)fc, fc->size());
     const size_t nn = fc->size() / word_sz;
     n = MIN2(nn, n);
     assert((ssize_t)n >= 1, "Control point invariant");
@@ -2773,6 +2830,7 @@
     // dictionary and return, leaving "fl" empty.
     if (n == 0) {
       returnChunkToDictionary(fc);
+      assert(fl->count() == 0, "We never allocated any blocks");
       return;
     }
 
@@ -2785,11 +2843,14 @@
       size_t prefix_size = n * word_sz;
       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
       rem_fc->setSize(rem);
+      rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
       rem_fc->linkNext(NULL);
-      rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
       // Above must occur before BOT is updated below.
       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
+      OrderAccess::storestore();
       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
+      assert(fc->isFree(), "Error");
+      fc->setSize(prefix_size);
       if (rem >= IndexSetSize) {
         returnChunkToDictionary(rem_fc);
         dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
@@ -2815,11 +2876,12 @@
   for (ssize_t i = n-1; i > 0; i--) {
     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
     ffc->setSize(word_sz);
+    ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
     ffc->linkNext(NULL);
-    ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
     // Above must occur before BOT is updated below.
+    OrderAccess::storestore();
     // splitting from the right, fc_size == (n - i + 1) * wordsize
-    _bt.mark_block((HeapWord*)ffc, word_sz);
+    _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
     fc_size -= word_sz;
     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
@@ -2828,9 +2890,11 @@
     fl->returnChunkAtHead(ffc);
   }
   // First chunk
+  assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
+  // The blocks above should show their new sizes before the first block below
   fc->setSize(word_sz);
+  fc->linkPrev(NULL);    // idempotent wrt free-ness, see assert above
   fc->linkNext(NULL);
-  fc->linkPrev(NULL);
   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   _bt.verify_single_block((HeapWord*)fc, fc->size());
   fl->returnChunkAtHead(fc);