# HG changeset patch # User tschatzl # Date 1395671460 -3600 # Node ID 5479cb006184db145890866692a86547708e2c71 # Parent bc22cbb8b45a344b8e5c9ca00ce29357d0131407 8036860: Pad and cache-align the BiasedMappedArray Summary: Pad and cache-align BiasedMappedArray instances by default to avoid performance variability problems due to false sharing, as instances of this data structures are typically used for performance sensitive code. Reviewed-by: brutisso, stefank diff -r bc22cbb8b45a -r 5479cb006184 src/share/vm/gc_implementation/g1/g1BiasedArray.cpp --- a/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp Mon Mar 24 15:30:56 2014 +0100 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp Mon Mar 24 15:31:00 2014 +0100 @@ -24,6 +24,14 @@ #include "precompiled.hpp" #include "gc_implementation/g1/g1BiasedArray.hpp" +#include "memory/padded.inline.hpp" + +// Allocate a new array, generic version. +address G1BiasedMappedArrayBase::create_new_base_array(size_t length, size_t elem_size) { + assert(length > 0, "just checking"); + assert(elem_size > 0, "just checking"); + return PaddedPrimitiveArray::create_unfreeable(length * elem_size); +} #ifndef PRODUCT void G1BiasedMappedArrayBase::verify_index(idx_t index) const { diff -r bc22cbb8b45a -r 5479cb006184 src/share/vm/gc_implementation/g1/g1BiasedArray.hpp --- a/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp Mon Mar 24 15:30:56 2014 +0100 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp Mon Mar 24 15:31:00 2014 +0100 @@ -25,8 +25,8 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP +#include "memory/allocation.hpp" #include "utilities/debug.hpp" -#include "memory/allocation.inline.hpp" // Implements the common base functionality for arrays that contain provisions // for accessing its elements using a biased index. @@ -48,11 +48,7 @@ _bias(0), _shift_by(0) { } // Allocate a new array, generic version. - static address create_new_base_array(size_t length, size_t elem_size) { - assert(length > 0, "just checking"); - assert(elem_size > 0, "just checking"); - return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC); - } + static address create_new_base_array(size_t length, size_t elem_size); // Initialize the members of this class. The biased start address of this array // is the bias (in elements) multiplied by the element size. diff -r bc22cbb8b45a -r 5479cb006184 src/share/vm/memory/padded.hpp --- a/src/share/vm/memory/padded.hpp Mon Mar 24 15:30:56 2014 +0100 +++ b/src/share/vm/memory/padded.hpp Mon Mar 24 15:31:00 2014 +0100 @@ -101,4 +101,12 @@ static T** create_unfreeable(uint rows, uint columns, size_t* allocation_size = NULL); }; +// Helper class to create an array of T objects. The array as a whole will +// start at a multiple of alignment and its size will be aligned to alignment. +template +class PaddedPrimitiveArray { + public: + static T* create_unfreeable(size_t length); +}; + #endif // SHARE_VM_MEMORY_PADDED_HPP diff -r bc22cbb8b45a -r 5479cb006184 src/share/vm/memory/padded.inline.hpp --- a/src/share/vm/memory/padded.inline.hpp Mon Mar 24 15:30:56 2014 +0100 +++ b/src/share/vm/memory/padded.inline.hpp Mon Mar 24 15:31:00 2014 +0100 @@ -76,3 +76,13 @@ return result; } + +template +T* PaddedPrimitiveArray::create_unfreeable(size_t length) { + // Allocate a chunk of memory large enough to allow for some alignment. + void* chunk = AllocateHeap(length * sizeof(T) + alignment, flags); + + memset(chunk, 0, length * sizeof(T) + alignment); + + return (T*)align_pointer_up(chunk, alignment); +}