diff src/share/vm/memory/metaspace.cpp @ 14518:d8041d695d19

Merged with jdk9/dev/hotspot changeset 3812c088b945
author twisti
date Tue, 11 Mar 2014 18:45:59 -0700
parents aa8fab3bed11
children 4ca6dc0799b6
line wrap: on
line diff
--- a/src/share/vm/memory/metaspace.cpp	Wed Mar 12 00:00:05 2014 +0100
+++ b/src/share/vm/memory/metaspace.cpp	Tue Mar 11 18:45:59 2014 -0700
@@ -46,8 +46,8 @@
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
 
-typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
-typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
+typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
+typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
 
 // Set this constant to enable slow integrity checking of the free chunk lists
 const bool metaspace_slow_verify = false;
@@ -513,8 +513,6 @@
   // Unlink empty VirtualSpaceNodes and free it.
   void purge(ChunkManager* chunk_manager);
 
-  bool contains(const void *ptr);
-
   void print_on(outputStream* st) const;
 
   class VirtualSpaceListIterator : public StackObj {
@@ -558,7 +556,7 @@
 
  private:
 
-  // protects allocations and contains.
+  // protects allocations
   Mutex* const _lock;
 
   // Type of metadata allocated.
@@ -595,7 +593,11 @@
  private:
   // Accessors
   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
-  void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
+  void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
+    // ensure lock-free iteration sees fully initialized node
+    OrderAccess::storestore();
+    _chunks_in_use[index] = v;
+  }
 
   BlockFreelist* block_freelists() const {
     return (BlockFreelist*) &_block_freelists;
@@ -708,6 +710,8 @@
   void print_on(outputStream* st) const;
   void locked_print_chunks_in_use_on(outputStream* st) const;
 
+  bool contains(const void *ptr);
+
   void verify();
   void verify_chunk_size(Metachunk* chunk);
   NOT_PRODUCT(void mangle_freed_chunks();)
@@ -742,7 +746,7 @@
   assert_lock_strong(SpaceManager::expand_lock());
   _container_count++;
   assert(_container_count == container_count_slow(),
-         err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
+         err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT
                  " container_count_slow() " SIZE_FORMAT,
                  _container_count, container_count_slow()));
 }
@@ -755,7 +759,7 @@
 #ifdef ASSERT
 void VirtualSpaceNode::verify_container_count() {
   assert(_container_count == container_count_slow(),
-    err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
+    err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT
             " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
 }
 #endif
@@ -786,7 +790,7 @@
     return NULL;
   }
 
-  if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
+  if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
     // Dark matter.  Too small for dictionary.
     return NULL;
   }
@@ -806,7 +810,7 @@
   MetaWord* new_block = (MetaWord*)free_block;
   assert(block_size >= word_size, "Incorrect size of block from freelist");
   const size_t unused = block_size - word_size;
-  if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
+  if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
     return_block(new_block + word_size, unused);
   }
 
@@ -1159,8 +1163,6 @@
   } else {
     assert(new_entry->reserved_words() == vs_word_size,
         "Reserved memory size differs from requested memory size");
-    // ensure lock-free iteration sees fully initialized node
-    OrderAccess::storestore();
     link_vs(new_entry);
     return true;
   }
@@ -1287,19 +1289,6 @@
   }
 }
 
-bool VirtualSpaceList::contains(const void *ptr) {
-  VirtualSpaceNode* list = virtual_space_list();
-  VirtualSpaceListIterator iter(list);
-  while (iter.repeat()) {
-    VirtualSpaceNode* node = iter.get_next();
-    if (node->reserved()->contains(ptr)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
 // MetaspaceGC methods
 
 // VM_CollectForMetadataAllocation is the vm operation used to GC.
@@ -1466,9 +1455,10 @@
 
   // No expansion, now see if we want to shrink
   // We would never want to shrink more than this
+  assert(capacity_until_GC >= minimum_desired_capacity,
+         err_msg(SIZE_FORMAT " >= " SIZE_FORMAT,
+                 capacity_until_GC, minimum_desired_capacity));
   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
-  assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
-    max_shrink_bytes));
 
   // Should shrinking be considered?
   if (MaxMetaspaceFreeRatio < 100) {
@@ -2250,7 +2240,7 @@
 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
   assert_lock_strong(_lock);
   size_t raw_word_size = get_raw_word_size(word_size);
-  size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
+  size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
   assert(raw_word_size >= min_size,
          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
   block_freelists()->return_block(p, raw_word_size);
@@ -2306,7 +2296,7 @@
 void SpaceManager::retire_current_chunk() {
   if (current_chunk() != NULL) {
     size_t remaining_words = current_chunk()->free_word_size();
-    if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
+    if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
       inc_used_metrics(remaining_words);
     }
@@ -2392,9 +2382,24 @@
   return result;
 }
 
+// This function looks at the chunks in the metaspace without locking.
+// The chunks are added with store ordering and not deleted except for at
+// unloading time.
+bool SpaceManager::contains(const void *ptr) {
+  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i))
+  {
+    Metachunk* curr = chunks_in_use(i);
+    while (curr != NULL) {
+      if (curr->contains(ptr)) return true;
+      curr = curr->next();
+    }
+  }
+  return false;
+}
+
 void SpaceManager::verify() {
   // If there are blocks in the dictionary, then
-  // verfication of chunks does not work since
+  // verification of chunks does not work since
   // being in the dictionary alters a chunk.
   if (block_freelists()->total_size() == 0) {
     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
@@ -2863,7 +2868,7 @@
     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
     // If compressed class space fits in lower 32G, we don't need a base.
     if (higher_address <= (address)klass_encoding_max) {
-      lower_base = 0; // effectively lower base is zero.
+      lower_base = 0; // Effectively lower base is zero.
     }
   }
 
@@ -3274,7 +3279,7 @@
     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
     // Don't take Heap_lock
     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
-    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
+    if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
       // Dark matter.  Too small for dictionary.
 #ifdef ASSERT
       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
@@ -3289,7 +3294,7 @@
   } else {
     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
 
-    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
+    if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
       // Dark matter.  Too small for dictionary.
 #ifdef ASSERT
       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
@@ -3463,17 +3468,12 @@
   }
 }
 
-bool Metaspace::contains(const void * ptr) {
-  if (MetaspaceShared::is_in_shared_space(ptr)) {
-    return true;
+bool Metaspace::contains(const void* ptr) {
+  if (vsm()->contains(ptr)) return true;
+  if (using_class_space()) {
+    return class_vsm()->contains(ptr);
   }
-  // This is checked while unlocked.  As long as the virtualspaces are added
-  // at the end, the pointer will be in one of them.  The virtual spaces
-  // aren't deleted presently.  When they are, some sort of locking might
-  // be needed.  Note, locking this can cause inversion problems with the
-  // caller in MetaspaceObj::is_metadata() function.
-  return space_list()->contains(ptr) ||
-         (using_class_space() && class_space_list()->contains(ptr));
+  return false;
 }
 
 void Metaspace::verify() {