changeset 17760:5479cb006184

8036860: Pad and cache-align the BiasedMappedArray Summary: Pad and cache-align BiasedMappedArray instances by default to avoid performance variability problems due to false sharing, as instances of this data structures are typically used for performance sensitive code. Reviewed-by: brutisso, stefank
author tschatzl
date Mon, 24 Mar 2014 15:31:00 +0100
parents bc22cbb8b45a
children ac767c227ea2
files src/share/vm/gc_implementation/g1/g1BiasedArray.cpp src/share/vm/gc_implementation/g1/g1BiasedArray.hpp src/share/vm/memory/padded.hpp src/share/vm/memory/padded.inline.hpp
diffstat 4 files changed, 28 insertions(+), 6 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp	Mon Mar 24 15:30:56 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp	Mon Mar 24 15:31:00 2014 +0100
@@ -24,6 +24,14 @@
 
 #include "precompiled.hpp"
 #include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "memory/padded.inline.hpp"
+
+// Allocate a new array, generic version.
+address G1BiasedMappedArrayBase::create_new_base_array(size_t length, size_t elem_size) {
+  assert(length > 0, "just checking");
+  assert(elem_size > 0, "just checking");
+  return PaddedPrimitiveArray<u_char, mtGC>::create_unfreeable(length * elem_size);
+}
 
 #ifndef PRODUCT
 void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
--- a/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Mon Mar 24 15:30:56 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Mon Mar 24 15:31:00 2014 +0100
@@ -25,8 +25,8 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
 
+#include "memory/allocation.hpp"
 #include "utilities/debug.hpp"
-#include "memory/allocation.inline.hpp"
 
 // Implements the common base functionality for arrays that contain provisions
 // for accessing its elements using a biased index.
@@ -48,11 +48,7 @@
     _bias(0), _shift_by(0) { }
 
   // Allocate a new array, generic version.
-  static address create_new_base_array(size_t length, size_t elem_size) {
-    assert(length > 0, "just checking");
-    assert(elem_size > 0, "just checking");
-    return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
-  }
+  static address create_new_base_array(size_t length, size_t elem_size);
 
   // Initialize the members of this class. The biased start address of this array
   // is the bias (in elements) multiplied by the element size.
--- a/src/share/vm/memory/padded.hpp	Mon Mar 24 15:30:56 2014 +0100
+++ b/src/share/vm/memory/padded.hpp	Mon Mar 24 15:31:00 2014 +0100
@@ -101,4 +101,12 @@
   static T** create_unfreeable(uint rows, uint columns, size_t* allocation_size = NULL);
 };
 
+// Helper class to create an array of T objects. The array as a whole will
+// start at a multiple of alignment and its size will be aligned to alignment.
+template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class PaddedPrimitiveArray {
+ public:
+  static T* create_unfreeable(size_t length);
+};
+
 #endif // SHARE_VM_MEMORY_PADDED_HPP
--- a/src/share/vm/memory/padded.inline.hpp	Mon Mar 24 15:30:56 2014 +0100
+++ b/src/share/vm/memory/padded.inline.hpp	Mon Mar 24 15:31:00 2014 +0100
@@ -76,3 +76,13 @@
 
   return result;
 }
+
+template <class T, MEMFLAGS flags, size_t alignment>
+T* PaddedPrimitiveArray<T, flags, alignment>::create_unfreeable(size_t length) {
+  // Allocate a chunk of memory large enough to allow for some alignment.
+  void* chunk = AllocateHeap(length * sizeof(T) + alignment, flags);
+
+  memset(chunk, 0, length * sizeof(T) + alignment);
+
+  return (T*)align_pointer_up(chunk, alignment);
+}