changeset 20760:6e56d7f1634f jdk8u31-b05

8026303: CMS: JVM intermittently crashes with "FreeList of size 258 violates Conservation Principle" assert Reviewed-by: tschatzl, brutisso
author jmasa
date Thu, 21 Nov 2013 09:57:00 -0800
parents 4b26b980ec8d
children e620c670a9a7
files src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
diffstat 2 files changed, 65 insertions(+), 14 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Mon Oct 20 15:14:56 2014 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Nov 21 09:57:00 2013 -0800
@@ -2760,10 +2760,12 @@
   }
 }
 
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
-  assert(fl->count() == 0, "Precondition.");
-  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
-         "Precondition");
+// Used by par_get_chunk_of_blocks() for the chunks from the
+// indexed_free_lists.  Looks for a chunk with size that is a multiple
+// of "word_sz" and if found, splits it into "word_sz" chunks and add
+// to the free list "fl".  "n" is the maximum number of chunks to
+// be added to "fl".
+bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
 
   // We'll try all multiples of word_sz in the indexed set, starting with
   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
@@ -2844,11 +2846,15 @@
                         Mutex::_no_safepoint_check_flag);
         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
         _indexedFreeList[word_sz].set_split_births(births);
-        return;
+        return true;
       }
     }
+    return found;
   }
-  // Otherwise, we'll split a block from the dictionary.
+}
+
+FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
+
   FreeChunk* fc = NULL;
   FreeChunk* rem_fc = NULL;
   size_t rem;
@@ -2859,16 +2865,12 @@
       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
                                   FreeBlockDictionary<FreeChunk>::atLeast);
       if (fc != NULL) {
-        _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
-        dictionary()->dict_census_update(fc->size(),
-                                       true /*split*/,
-                                       false /*birth*/);
         break;
       } else {
         n--;
       }
     }
-    if (fc == NULL) return;
+    if (fc == NULL) return NULL;
     // Otherwise, split up that block.
     assert((ssize_t)n >= 1, "Control point invariant");
     assert(fc->is_free(), "Error: should be a free block");
@@ -2890,10 +2892,14 @@
     // dictionary and return, leaving "fl" empty.
     if (n == 0) {
       returnChunkToDictionary(fc);
-      assert(fl->count() == 0, "We never allocated any blocks");
-      return;
+      return NULL;
     }
 
+    _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
+    dictionary()->dict_census_update(fc->size(),
+                                     true /*split*/,
+                                     false /*birth*/);
+
     // First return the remainder, if any.
     // Note that we hold the lock until we decide if we're going to give
     // back the remainder to the dictionary, since a concurrent allocation
@@ -2926,7 +2932,24 @@
     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
     smallSplitBirth(rem);
   }
-  assert((ssize_t)n > 0 && fc != NULL, "Consistency");
+  assert(n * word_sz == fc->size(),
+    err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
+    SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
+    fc->size(), n, word_sz));
+  return fc;
+}
+
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
+
+  FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
+
+  if (fc == NULL) {
+    return;
+  }
+
+  size_t n = fc->size() / word_sz;
+
+  assert((ssize_t)n > 0, "Consistency");
   // Now do the splitting up.
   // Must do this in reverse order, so that anybody attempting to
   // access the main chunk sees it as a single free block until we
@@ -2974,6 +2997,20 @@
   assert(fl->tail()->next() == NULL, "List invariant.");
 }
 
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
+  assert(fl->count() == 0, "Precondition.");
+  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
+         "Precondition");
+
+  if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
+    // Got it
+    return;
+  }
+
+  // Otherwise, we'll split a block from the dictionary.
+  par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
+}
+
 // Set up the space's par_seq_tasks structure for work claiming
 // for parallel rescan. See CMSParRemarkTask where this is currently used.
 // XXX Need to suitably abstract and generalize this and the next
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Mon Oct 20 15:14:56 2014 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Nov 21 09:57:00 2013 -0800
@@ -172,6 +172,20 @@
   // list of size "word_sz", and must now be decremented.
   void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
 
+  // Used by par_get_chunk_of_blocks() for the chunks from the
+  // indexed_free_lists.
+  bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+
+  // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
+  // evenly splittable into "n" "word_sz" chunks.  Returns that
+  // evenly splittable chunk.  May split a larger chunk to get the
+  // evenly splittable chunk.
+  FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
+
+  // Used by par_get_chunk_of_blocks() for the chunks from the
+  // dictionary.
+  void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+
   // Allocation helper functions
   // Allocate using a strategy that takes from the indexed free lists
   // first.  This allocation strategy assumes a companion sweeping