Mercurial > hg > graal-compiler
diff src/share/vm/memory/metaspace.cpp @ 13102:f9f4503a4ab5
Merge
author | Christos Kotselidis <christos.kotselidis@oracle.com> |
---|---|
date | Thu, 21 Nov 2013 15:04:54 +0100 |
parents | 11b116661830 |
children | fa76dce60db7 ce86c36b8921 |
line wrap: on
line diff
--- a/src/share/vm/memory/metaspace.cpp Thu Nov 21 15:04:26 2013 +0100 +++ b/src/share/vm/memory/metaspace.cpp Thu Nov 21 15:04:54 2013 +0100 @@ -29,34 +29,34 @@ #include "memory/collectorPolicy.hpp" #include "memory/filemap.hpp" #include "memory/freeList.hpp" -#include "memory/metablock.hpp" +#include "memory/gcLocker.hpp" #include "memory/metachunk.hpp" #include "memory/metaspace.hpp" #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/globals.hpp" +#include "runtime/init.hpp" #include "runtime/java.hpp" #include "runtime/mutex.hpp" #include "runtime/orderAccess.hpp" #include "services/memTracker.hpp" +#include "services/memoryService.hpp" #include "utilities/copy.hpp" #include "utilities/debug.hpp" typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary; typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary; -// Define this macro to enable slow integrity checking of -// the free chunk lists + +// Set this constant to enable slow integrity checking of the free chunk lists const bool metaspace_slow_verify = false; -// Parameters for stress mode testing -const uint metadata_deallocate_a_lot_block = 10; -const uint metadata_deallocate_a_lock_chunk = 3; size_t const allocation_from_dictionary_limit = 4 * K; MetaWord* last_allocated = 0; -size_t Metaspace::_class_metaspace_size; +size_t Metaspace::_compressed_class_space_size; // Used in declarations in SpaceManager and ChunkManager enum ChunkIndex { @@ -75,8 +75,7 @@ ClassSmallChunk = 256, SmallChunk = 512, ClassMediumChunk = 4 * K, - MediumChunk = 8 * K, - HumongousChunkGranularity = 8 + MediumChunk = 8 * K }; static ChunkIndex next_chunk_index(ChunkIndex i) { @@ -84,35 +83,15 @@ return (ChunkIndex) (i+1); } -// Originally _capacity_until_GC was set to MetaspaceSize here but -// the default MetaspaceSize before argument processing was being -// used which was not the desired value. See the code -// in should_expand() to see how the initialization is handled -// now. -size_t MetaspaceGC::_capacity_until_GC = 0; -bool MetaspaceGC::_expand_after_GC = false; +volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; uint MetaspaceGC::_shrink_factor = 0; bool MetaspaceGC::_should_concurrent_collect = false; -// Blocks of space for metadata are allocated out of Metachunks. -// -// Metachunk are allocated out of MetadataVirtualspaces and once -// allocated there is no explicit link between a Metachunk and -// the MetadataVirtualspaces from which it was allocated. -// -// Each SpaceManager maintains a -// list of the chunks it is using and the current chunk. The current -// chunk is the chunk from which allocations are done. Space freed in -// a chunk is placed on the free list of blocks (BlockFreelist) and -// reused from there. - typedef class FreeList<Metachunk> ChunkList; // Manages the global free lists of chunks. -// Has three lists of free chunks, and a total size and -// count that includes all three - class ChunkManager : public CHeapObj<mtInternal> { + friend class TestVirtualSpaceNodeTest; // Free list of chunks of different sizes. // SpecializedChunk @@ -121,7 +100,6 @@ // HumongousChunk ChunkList _free_chunks[NumberOfFreeLists]; - // HumongousChunk ChunkTreeDictionary _humongous_dictionary; @@ -168,7 +146,6 @@ // add or delete (return) a chunk to the global freelist. Metachunk* chunk_freelist_allocate(size_t word_size); - void chunk_freelist_deallocate(Metachunk* chunk); // Map a size to a list index assuming that there are lists // for special, small, medium, and humongous chunks. @@ -202,9 +179,7 @@ // Returns the list for the given chunk word size. ChunkList* find_free_chunks_list(size_t word_size); - // Add and remove from a list by size. Selects - // list based on size of chunk. - void free_chunks_put(Metachunk* chuck); + // Remove from a list by size. Selects list based on size of chunk. Metachunk* free_chunks_get(size_t chunk_word_size); // Debug support @@ -232,7 +207,6 @@ // to the allocation of a quantum of metadata). class BlockFreelist VALUE_OBJ_CLASS_SPEC { BlockTreeDictionary* _dictionary; - static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size); // Only allocate and split from freelist if the size of the allocation // is at least 1/4th the size of the available block. @@ -260,6 +234,7 @@ void print_on(outputStream* st) const; }; +// A VirtualSpaceList node. class VirtualSpaceNode : public CHeapObj<mtClass> { friend class VirtualSpaceList; @@ -282,6 +257,8 @@ // VirtualSpace Metachunk* first_chunk() { return (Metachunk*) bottom(); } + // Committed but unused space in the virtual space + size_t free_words_in_vs() const; public: VirtualSpaceNode(size_t byte_size); @@ -293,9 +270,10 @@ MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } - size_t expanded_words() const { return _virtual_space.committed_size() / BytesPerWord; } size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } + bool is_pre_committed() const { return _virtual_space.special(); } + // address of next available space in _virtual_space; // Accessors VirtualSpaceNode* next() { return _next; } @@ -325,7 +303,6 @@ // used and capacity in this single entry in the list size_t used_words_in_vs() const; size_t capacity_words_in_vs() const; - size_t free_words_in_vs() const; bool initialize(); @@ -337,12 +314,19 @@ // Expands/shrinks the committed space in a virtual space. Delegates // to Virtualspace - bool expand_by(size_t words, bool pre_touch = false); + bool expand_by(size_t min_words, size_t preferred_words); // In preparation for deleting this node, remove all the chunks // in the node from any freelist. void purge(ChunkManager* chunk_manager); + // If an allocation doesn't fit in the current node a new node is created. + // Allocate chunks out of the remaining committed space in this node + // to avoid wasting that memory. + // This always adds up because all the chunk sizes are multiples of + // the smallest chunk size. + void retire(ChunkManager* chunk_manager); + #ifdef ASSERT // Debug support void mangle(); @@ -351,42 +335,77 @@ void print_on(outputStream* st) const; }; +#define assert_is_ptr_aligned(ptr, alignment) \ + assert(is_ptr_aligned(ptr, alignment), \ + err_msg(PTR_FORMAT " is not aligned to " \ + SIZE_FORMAT, ptr, alignment)) + +#define assert_is_size_aligned(size, alignment) \ + assert(is_size_aligned(size, alignment), \ + err_msg(SIZE_FORMAT " is not aligned to " \ + SIZE_FORMAT, size, alignment)) + + +// Decide if large pages should be committed when the memory is reserved. +static bool should_commit_large_pages_when_reserving(size_t bytes) { + if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { + size_t words = bytes / BytesPerWord; + bool is_class = false; // We never reserve large pages for the class space. + if (MetaspaceGC::can_expand(words, is_class) && + MetaspaceGC::allowed_expansion() >= words) { + return true; + } + } + + return false; +} + // byte_size is the size of the associated virtualspace. -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) { - // align up to vm allocation granularity - byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); +VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { + assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); // This allocates memory with mmap. For DumpSharedspaces, try to reserve // configurable address, generally at the top of the Java heap so other // memory addresses don't conflict. if (DumpSharedSpaces) { - char* shared_base = (char*)SharedBaseAddress; - _rs = ReservedSpace(byte_size, 0, false, shared_base, 0); + bool large_pages = false; // No large pages when dumping the CDS archive. + char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); + + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0); if (_rs.is_reserved()) { assert(shared_base == 0 || _rs.base() == shared_base, "should match"); } else { // Get a mmap region anywhere if the SharedBaseAddress fails. - _rs = ReservedSpace(byte_size); + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); } MetaspaceShared::set_shared_rs(&_rs); } else { - _rs = ReservedSpace(byte_size); + bool large_pages = should_commit_large_pages_when_reserving(bytes); + + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); } - MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); + if (_rs.is_reserved()) { + assert(_rs.base() != NULL, "Catch if we get a NULL address"); + assert(_rs.size() != 0, "Catch if we get a 0 size"); + assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); + assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); + + MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); + } } void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { Metachunk* chunk = first_chunk(); Metachunk* invalid_chunk = (Metachunk*) top(); while (chunk < invalid_chunk ) { - assert(chunk->is_free(), "Should be marked free"); - MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); - chunk_manager->remove_chunk(chunk); - assert(chunk->next() == NULL && - chunk->prev() == NULL, - "Was not removed from its list"); - chunk = (Metachunk*) next; + assert(chunk->is_tagged_free(), "Should be tagged free"); + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + chunk_manager->remove_chunk(chunk); + assert(chunk->next() == NULL && + chunk->prev() == NULL, + "Was not removed from its list"); + chunk = (Metachunk*) next; } } @@ -400,7 +419,7 @@ // Don't count the chunks on the free lists. Those are // still part of the VirtualSpaceNode but not currently // counted. - if (!chunk->is_free()) { + if (!chunk->is_tagged_free()) { count++; } chunk = (Metachunk*) next; @@ -410,8 +429,6 @@ #endif // List of VirtualSpaces for metadata allocation. -// It has a _next link for singly linked list and a MemRegion -// for total space in the VirtualSpace. class VirtualSpaceList : public CHeapObj<mtClass> { friend class VirtualSpaceNode; @@ -419,16 +436,13 @@ VirtualSpaceSize = 256 * K }; - // Global list of virtual spaces // Head of the list VirtualSpaceNode* _virtual_space_list; // virtual space currently being used for allocations VirtualSpaceNode* _current_virtual_space; - // Can this virtual list allocate >1 spaces? Also, used to determine - // whether to allocate unlimited small chunks in this virtual space + // Is this VirtualSpaceList used for the compressed class space bool _is_class; - bool can_grow() const { return !is_class() || !UseCompressedClassPointers; } // Sum of reserved and committed memory in the virtual spaces size_t _reserved_words; @@ -453,7 +467,11 @@ // Get another virtual space and add it to the list. This // is typically prompted by a failed attempt to allocate a chunk // and is typically followed by the allocation of a chunk. - bool grow_vs(size_t vs_word_size); + bool create_new_virtual_space(size_t vs_word_size); + + // Chunk up the unused committed space in the current + // virtual space and add the chunks to the free list. + void retire_current_virtual_space(); public: VirtualSpaceList(size_t word_size); @@ -465,12 +483,12 @@ size_t grow_chunks_by_words, size_t medium_chunk_bunch); - bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false); - - // Get the first chunk for a Metaspace. Used for - // special cases such as the boot class loader, reflection - // class loader and anonymous class loader. - Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch); + bool expand_node_by(VirtualSpaceNode* node, + size_t min_words, + size_t preferred_words); + + bool expand_by(size_t min_words, + size_t preferred_words); VirtualSpaceNode* current_virtual_space() { return _current_virtual_space; @@ -478,8 +496,7 @@ bool is_class() const { return _is_class; } - // Allocate the first virtualspace. - void initialize(size_t word_size); + bool initialization_succeeded() { return _virtual_space_list != NULL; } size_t reserved_words() { return _reserved_words; } size_t reserved_bytes() { return reserved_words() * BytesPerWord; } @@ -522,44 +539,16 @@ class Metadebug : AllStatic { // Debugging support for Metaspaces - static int _deallocate_block_a_lot_count; - static int _deallocate_chunk_a_lot_count; static int _allocation_fail_alot_count; public: - static int deallocate_block_a_lot_count() { - return _deallocate_block_a_lot_count; - } - static void set_deallocate_block_a_lot_count(int v) { - _deallocate_block_a_lot_count = v; - } - static void inc_deallocate_block_a_lot_count() { - _deallocate_block_a_lot_count++; - } - static int deallocate_chunk_a_lot_count() { - return _deallocate_chunk_a_lot_count; - } - static void reset_deallocate_chunk_a_lot_count() { - _deallocate_chunk_a_lot_count = 1; - } - static void inc_deallocate_chunk_a_lot_count() { - _deallocate_chunk_a_lot_count++; - } static void init_allocation_fail_alot_count(); #ifdef ASSERT static bool test_metadata_failure(); #endif - - static void deallocate_chunk_a_lot(SpaceManager* sm, - size_t chunk_word_size); - static void deallocate_block_a_lot(SpaceManager* sm, - size_t chunk_word_size); - }; -int Metadebug::_deallocate_block_a_lot_count = 0; -int Metadebug::_deallocate_chunk_a_lot_count = 0; int Metadebug::_allocation_fail_alot_count = 0; // SpaceManager - used by Metaspace to handle allocations @@ -647,10 +636,12 @@ bool is_class() { return _mdtype == Metaspace::ClassType; } // Accessors - size_t specialized_chunk_size() { return SpecializedChunk; } - size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } - size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } - size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } + size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } + size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } + size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } + size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } + + size_t smallest_chunk_size() { return specialized_chunk_size(); } size_t allocated_blocks_words() const { return _allocated_blocks_words; } size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } @@ -708,6 +699,9 @@ // and allocates from that chunk. MetaWord* grow_and_allocate(size_t word_size); + // Notify memory usage to MemoryService. + void track_metaspace_memory_usage(); + // debugging support. void dump(outputStream* const out) const; @@ -722,14 +716,11 @@ #endif size_t get_raw_word_size(size_t word_size) { - // If only the dictionary is going to be used (i.e., no - // indexed free list), then there is a minimum size requirement. - // MinChunkSize is a placeholder for the real minimum size JJJ size_t byte_size = word_size * BytesPerWord; - size_t raw_bytes_size = MAX2(byte_size, - Metablock::min_block_byte_size()); - raw_bytes_size = ARENA_ALIGN(raw_bytes_size); + size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); + raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); + size_t raw_word_size = raw_bytes_size / BytesPerWord; assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); @@ -782,17 +773,8 @@ } } -Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) { - Metablock* block = (Metablock*) p; - block->set_word_size(word_size); - block->set_prev(NULL); - block->set_next(NULL); - - return block; -} - void BlockFreelist::return_block(MetaWord* p, size_t word_size) { - Metablock* free_chunk = initialize_free_chunk(p, word_size); + Metablock* free_chunk = ::new (p) Metablock(word_size); if (dictionary() == NULL) { _dictionary = new BlockTreeDictionary(); } @@ -869,6 +851,12 @@ MetaWord* chunk_limit = top(); assert(chunk_limit != NULL, "Not safe to call this method"); + // The virtual spaces are always expanded by the + // commit granularity to enforce the following condition. + // Without this the is_available check will not work correctly. + assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), + "The committed memory doesn't match the expanded memory."); + if (!is_available(chunk_word_size)) { if (TraceMetadataChunkAllocation) { gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); @@ -888,14 +876,21 @@ // Expand the virtual space (commit more of the reserved space) -bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) { - size_t bytes = words * BytesPerWord; - bool result = virtual_space()->expand_by(bytes, pre_touch); - if (TraceMetavirtualspaceAllocation && !result) { - gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed " - "for byte size " SIZE_FORMAT, bytes); - virtual_space()->print_on(gclog_or_tty); +bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { + size_t min_bytes = min_words * BytesPerWord; + size_t preferred_bytes = preferred_words * BytesPerWord; + + size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); + + if (uncommitted < min_bytes) { + return false; } + + size_t commit = MIN2(preferred_bytes, uncommitted); + bool result = virtual_space()->expand_by(commit, false); + + assert(result, "Failed to commit memory"); + return result; } @@ -914,12 +909,23 @@ return false; } - // An allocation out of this Virtualspace that is larger - // than an initial commit size can waste that initial committed - // space. - size_t committed_byte_size = 0; - bool result = virtual_space()->initialize(_rs, committed_byte_size); + // These are necessary restriction to make sure that the virtual space always + // grows in steps of Metaspace::commit_alignment(). If both base and size are + // aligned only the middle alignment of the VirtualSpace is used. + assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); + assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); + + // ReservedSpaces marked as special will have the entire memory + // pre-committed. Setting a committed size will make sure that + // committed_size and actual_committed_size agrees. + size_t pre_committed_size = _rs.special() ? _rs.size() : 0; + + bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, + Metaspace::commit_alignment()); if (result) { + assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), + "Checking that the pre-committed memory was registered by the VirtualSpace"); + set_top((MetaWord*)virtual_space()->low()); set_reserved(MemRegion((HeapWord*)_rs.base(), (HeapWord*)(_rs.base() + _rs.size()))); @@ -976,13 +982,23 @@ _reserved_words = _reserved_words - v; } +#define assert_committed_below_limit() \ + assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ + err_msg("Too much committed memory. Committed: " SIZE_FORMAT \ + " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ + MetaspaceAux::committed_bytes(), MaxMetaspaceSize)); + void VirtualSpaceList::inc_committed_words(size_t v) { assert_lock_strong(SpaceManager::expand_lock()); _committed_words = _committed_words + v; + + assert_committed_below_limit(); } void VirtualSpaceList::dec_committed_words(size_t v) { assert_lock_strong(SpaceManager::expand_lock()); _committed_words = _committed_words - v; + + assert_committed_below_limit(); } void VirtualSpaceList::inc_virtual_space_count() { @@ -1004,7 +1020,7 @@ } // Chunk is being removed from the chunks free list. - dec_free_chunks_total(chunk->capacity_word_size()); + dec_free_chunks_total(chunk->word_size()); } // Walk the list of VirtualSpaceNodes and delete @@ -1025,8 +1041,8 @@ if (vsl->container_count() == 0 && vsl != current_virtual_space()) { // Unlink it from the list if (prev_vsl == vsl) { - // This is the case of the current note being the first note. - assert(vsl == virtual_space_list(), "Expected to be the first note"); + // This is the case of the current node being the first node. + assert(vsl == virtual_space_list(), "Expected to be the first node"); set_virtual_space_list(vsl->next()); } else { prev_vsl->set_next(vsl->next()); @@ -1054,7 +1070,36 @@ #endif } -VirtualSpaceList::VirtualSpaceList(size_t word_size ) : +void VirtualSpaceList::retire_current_virtual_space() { + assert_lock_strong(SpaceManager::expand_lock()); + + VirtualSpaceNode* vsn = current_virtual_space(); + + ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : + Metaspace::chunk_manager_metadata(); + + vsn->retire(cm); +} + +void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { + for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { + ChunkIndex index = (ChunkIndex)i; + size_t chunk_size = chunk_manager->free_chunks(index)->size(); + + while (free_words_in_vs() >= chunk_size) { + DEBUG_ONLY(verify_container_count();) + Metachunk* chunk = get_chunk_vs(chunk_size); + assert(chunk != NULL, "allocation should have been successful"); + + chunk_manager->return_chunks(index, chunk); + chunk_manager->inc_free_chunks_total(chunk_size); + DEBUG_ONLY(verify_container_count();) + } + } + assert(free_words_in_vs() == 0, "should be empty now"); +} + +VirtualSpaceList::VirtualSpaceList(size_t word_size) : _is_class(false), _virtual_space_list(NULL), _current_virtual_space(NULL), @@ -1063,9 +1108,7 @@ _virtual_space_count(0) { MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); - bool initialization_succeeded = grow_vs(word_size); - assert(initialization_succeeded, - " VirtualSpaceList initialization should not fail"); + create_new_virtual_space(word_size); } VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : @@ -1079,8 +1122,9 @@ Mutex::_no_safepoint_check_flag); VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); bool succeeded = class_entry->initialize(); - assert(succeeded, " VirtualSpaceList initialization should not fail"); - link_vs(class_entry); + if (succeeded) { + link_vs(class_entry); + } } size_t VirtualSpaceList::free_bytes() { @@ -1088,14 +1132,24 @@ } // Allocate another meta virtual space and add it to the list. -bool VirtualSpaceList::grow_vs(size_t vs_word_size) { +bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { assert_lock_strong(SpaceManager::expand_lock()); - if (vs_word_size == 0) { + + if (is_class()) { + assert(false, "We currently don't support more than one VirtualSpace for" + " the compressed class space. The initialization of the" + " CCS uses another code path and should not hit this path."); return false; } + + if (vs_word_size == 0) { + assert(false, "vs_word_size should always be at least _reserve_alignment large."); + return false; + } + // Reserve the space size_t vs_byte_size = vs_word_size * BytesPerWord; - assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned"); + assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); // Allocate the meta virtual space and initialize it. VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); @@ -1103,7 +1157,8 @@ delete new_entry; return false; } else { - assert(new_entry->reserved_words() == vs_word_size, "Must be"); + assert(new_entry->reserved_words() == vs_word_size, + "Reserved memory size differs from requested memory size"); // ensure lock-free iteration sees fully initialized node OrderAccess::storestore(); link_vs(new_entry); @@ -1130,20 +1185,68 @@ } } -bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) { +bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, + size_t min_words, + size_t preferred_words) { size_t before = node->committed_words(); - bool result = node->expand_by(word_size, pre_touch); + bool result = node->expand_by(min_words, preferred_words); size_t after = node->committed_words(); // after and before can be the same if the memory was pre-committed. - assert(after >= before, "Must be"); + assert(after >= before, "Inconsistency"); inc_committed_words(after - before); return result; } +bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { + assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); + assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); + assert(min_words <= preferred_words, "Invalid arguments"); + + if (!MetaspaceGC::can_expand(min_words, this->is_class())) { + return false; + } + + size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); + if (allowed_expansion_words < min_words) { + return false; + } + + size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); + + // Commit more memory from the the current virtual space. + bool vs_expanded = expand_node_by(current_virtual_space(), + min_words, + max_expansion_words); + if (vs_expanded) { + return true; + } + retire_current_virtual_space(); + + // Get another virtual space. + size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); + grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); + + if (create_new_virtual_space(grow_vs_words)) { + if (current_virtual_space()->is_pre_committed()) { + // The memory was pre-committed, so we are done here. + assert(min_words <= current_virtual_space()->committed_words(), + "The new VirtualSpace was pre-committed, so it" + "should be large enough to fit the alloc request."); + return true; + } + + return expand_node_by(current_virtual_space(), + min_words, + max_expansion_words); + } + + return false; +} + Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, size_t grow_chunks_by_words, size_t medium_chunk_bunch) { @@ -1151,63 +1254,27 @@ // Allocate a chunk out of the current virtual space. Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); - if (next == NULL) { - // Not enough room in current virtual space. Try to commit - // more space. - size_t expand_vs_by_words = MAX2(medium_chunk_bunch, - grow_chunks_by_words); - size_t page_size_words = os::vm_page_size() / BytesPerWord; - size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words, - page_size_words); - bool vs_expanded = - expand_by(current_virtual_space(), aligned_expand_vs_by_words); - if (!vs_expanded) { - // Should the capacity of the metaspaces be expanded for - // this allocation? If it's the virtual space for classes and is - // being used for CompressedHeaders, don't allocate a new virtualspace. - if (can_grow() && MetaspaceGC::should_expand(this, word_size)) { - // Get another virtual space. - size_t allocation_aligned_expand_words = - align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord); - size_t grow_vs_words = - MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words); - if (grow_vs(grow_vs_words)) { - // Got it. It's on the list now. Get a chunk from it. - assert(current_virtual_space()->expanded_words() == 0, - "New virtual space nodes should not have expanded"); - - size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words, - page_size_words); - // We probably want to expand by aligned_expand_vs_by_words here. - expand_by(current_virtual_space(), grow_chunks_by_words_aligned); - next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); - } - } else { - // Allocation will fail and induce a GC - if (TraceMetadataChunkAllocation && Verbose) { - gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():" - " Fail instead of expand the metaspace"); - } - } - } else { - // The virtual space expanded, get a new chunk - next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); - assert(next != NULL, "Just expanded, should succeed"); - } + if (next != NULL) { + return next; } - assert(next == NULL || (next->next() == NULL && next->prev() == NULL), - "New chunk is still on some list"); - return next; -} - -Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size, - size_t chunk_bunch) { - // Get a chunk from the chunk freelist - Metachunk* new_chunk = get_new_chunk(chunk_word_size, - chunk_word_size, - chunk_bunch); - return new_chunk; + // The expand amount is currently only determined by the requested sizes + // and not how much committed memory is left in the current virtual space. + + size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); + size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); + if (min_word_size >= preferred_word_size) { + // Can happen when humongous chunks are allocated. + preferred_word_size = min_word_size; + } + + bool expanded = expand_by(min_word_size, preferred_word_size); + if (expanded) { + next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); + assert(next != NULL, "The allocation was expected to succeed after the expansion"); + } + + return next; } void VirtualSpaceList::print_on(outputStream* st) const { @@ -1256,96 +1323,96 @@ // Calculate the amount to increase the high water mark (HWM). // Increase by a minimum amount (MinMetaspaceExpansion) so that // another expansion is not requested too soon. If that is not -// enough to satisfy the allocation (i.e. big enough for a word_size -// allocation), increase by MaxMetaspaceExpansion. If that is still -// not enough, expand by the size of the allocation (word_size) plus -// some. -size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) { - size_t before_inc = MetaspaceGC::capacity_until_GC(); - size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord; - size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord; - size_t page_size_words = os::vm_page_size() / BytesPerWord; - size_t size_delta_words = align_size_up(word_size, page_size_words); - size_t delta_words = MAX2(size_delta_words, min_delta_words); - if (delta_words > min_delta_words) { +// enough to satisfy the allocation, increase by MaxMetaspaceExpansion. +// If that is still not enough, expand by the size of the allocation +// plus some. +size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { + size_t min_delta = MinMetaspaceExpansion; + size_t max_delta = MaxMetaspaceExpansion; + size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); + + if (delta <= min_delta) { + delta = min_delta; + } else if (delta <= max_delta) { // Don't want to hit the high water mark on the next // allocation so make the delta greater than just enough // for this allocation. - delta_words = MAX2(delta_words, max_delta_words); - if (delta_words > max_delta_words) { - // This allocation is large but the next ones are probably not - // so increase by the minimum. - delta_words = delta_words + min_delta_words; - } + delta = max_delta; + } else { + // This allocation is large but the next ones are probably not + // so increase by the minimum. + delta = delta + min_delta; } - return delta_words; + + assert_is_size_aligned(delta, Metaspace::commit_alignment()); + + return delta; +} + +size_t MetaspaceGC::capacity_until_GC() { + size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); + assert(value >= MetaspaceSize, "Not initialied properly?"); + return value; } -bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { - - // If the user wants a limit, impose one. - // The reason for someone using this flag is to limit reserved space. So - // for non-class virtual space, compare against virtual spaces that are reserved. - // For class virtual space, we only compare against the committed space, not - // reserved space, because this is a larger space prereserved for compressed - // class pointers. - if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { - size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); - size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); - size_t real_allocated = nonclass_allocated + class_allocated; - if (real_allocated >= MaxMetaspaceSize) { +size_t MetaspaceGC::inc_capacity_until_GC(size_t v) { + assert_is_size_aligned(v, Metaspace::commit_alignment()); + + return (size_t)Atomic::add_ptr(v, &_capacity_until_GC); +} + +size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { + assert_is_size_aligned(v, Metaspace::commit_alignment()); + + return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); +} + +bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { + // Check if the compressed class space is full. + if (is_class && Metaspace::using_class_space()) { + size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); + if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { return false; } } - // Class virtual space should always be expanded. Call GC for the other - // metadata virtual space. - if (Metaspace::using_class_space() && - (vsl == Metaspace::class_space_list())) return true; - - // If this is part of an allocation after a GC, expand - // unconditionally. - if (MetaspaceGC::expand_after_GC()) { - return true; + // Check if the user has imposed a limit on the metaspace memory. + size_t committed_bytes = MetaspaceAux::committed_bytes(); + if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { + return false; } - - // If the capacity is below the minimum capacity, allow the - // expansion. Also set the high-water-mark (capacity_until_GC) - // to that minimum capacity so that a GC will not be induced - // until that minimum capacity is exceeded. - size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); - size_t metaspace_size_bytes = MetaspaceSize; - if (committed_capacity_bytes < metaspace_size_bytes || - capacity_until_GC() == 0) { - set_capacity_until_GC(metaspace_size_bytes); - return true; - } else { - if (committed_capacity_bytes < capacity_until_GC()) { - return true; - } else { - if (TraceMetadataChunkAllocation && Verbose) { - gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT - " capacity_until_GC " SIZE_FORMAT - " allocated_capacity_bytes " SIZE_FORMAT, - word_size, - capacity_until_GC(), - MetaspaceAux::allocated_capacity_bytes()); - } - return false; - } + return true; +} + +size_t MetaspaceGC::allowed_expansion() { + size_t committed_bytes = MetaspaceAux::committed_bytes(); + + size_t left_until_max = MaxMetaspaceSize - committed_bytes; + + // Always grant expansion if we are initiating the JVM, + // or if the GC_locker is preventing GCs. + if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) { + return left_until_max / BytesPerWord; } + + size_t capacity_until_gc = capacity_until_GC(); + + if (capacity_until_gc <= committed_bytes) { + return 0; + } + + size_t left_until_GC = capacity_until_gc - committed_bytes; + size_t left_to_commit = MIN2(left_until_GC, left_until_max); + + return left_to_commit / BytesPerWord; } - - void MetaspaceGC::compute_new_size() { assert(_shrink_factor <= 100, "invalid shrink factor"); uint current_shrink_factor = _shrink_factor; _shrink_factor = 0; - // Until a faster way of calculating the "used" quantity is implemented, - // use "capacity". const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); @@ -1377,9 +1444,10 @@ // If we have less capacity below the metaspace HWM, then // increment the HWM. size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; + expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); // Don't expand unless it's significant if (expand_bytes >= MinMetaspaceExpansion) { - MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); + MetaspaceGC::inc_capacity_until_GC(expand_bytes); } if (PrintGCDetails && Verbose) { size_t new_capacity_until_GC = capacity_until_GC; @@ -1436,6 +1504,9 @@ // on the third call, and 100% by the fourth call. But if we recompute // size without shrinking, it goes back to 0%. shrink_bytes = shrink_bytes / 100 * current_shrink_factor; + + shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); + assert(shrink_bytes <= max_shrink_bytes, err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, shrink_bytes, max_shrink_bytes)); @@ -1467,60 +1538,12 @@ // Don't shrink unless it's significant if (shrink_bytes >= MinMetaspaceExpansion && ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { - MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); + MetaspaceGC::dec_capacity_until_GC(shrink_bytes); } } // Metadebug methods -void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm, - size_t chunk_word_size){ -#ifdef ASSERT - VirtualSpaceList* vsl = sm->vs_list(); - if (MetaDataDeallocateALot && - Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) { - Metadebug::reset_deallocate_chunk_a_lot_count(); - for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) { - Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size); - if (dummy_chunk == NULL) { - break; - } - sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk); - - if (TraceMetadataChunkAllocation && Verbose) { - gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ", - sm->sum_count_in_chunks_in_use()); - dummy_chunk->print_on(gclog_or_tty); - gclog_or_tty->print_cr(" Free chunks total %d count %d", - sm->chunk_manager()->free_chunks_total_words(), - sm->chunk_manager()->free_chunks_count()); - } - } - } else { - Metadebug::inc_deallocate_chunk_a_lot_count(); - } -#endif -} - -void Metadebug::deallocate_block_a_lot(SpaceManager* sm, - size_t raw_word_size){ -#ifdef ASSERT - if (MetaDataDeallocateALot && - Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) { - Metadebug::set_deallocate_block_a_lot_count(0); - for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) { - MetaWord* dummy_block = sm->allocate_work(raw_word_size); - if (dummy_block == 0) { - break; - } - sm->deallocate(dummy_block, raw_word_size); - } - } else { - Metadebug::inc_deallocate_block_a_lot_count(); - } -#endif -} - void Metadebug::init_allocation_fail_alot_count() { if (MetadataAllocationFailALot) { _allocation_fail_alot_count = @@ -1664,31 +1687,6 @@ return free_chunks(index); } -void ChunkManager::free_chunks_put(Metachunk* chunk) { - assert_lock_strong(SpaceManager::expand_lock()); - ChunkList* free_list = find_free_chunks_list(chunk->word_size()); - chunk->set_next(free_list->head()); - free_list->set_head(chunk); - // chunk is being returned to the chunk free list - inc_free_chunks_total(chunk->capacity_word_size()); - slow_locked_verify(); -} - -void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) { - // The deallocation of a chunk originates in the freelist - // manangement code for a Metaspace and does not hold the - // lock. - assert(chunk != NULL, "Deallocating NULL"); - assert_lock_strong(SpaceManager::expand_lock()); - slow_locked_verify(); - if (TraceMetadataChunkAllocation) { - gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk " - PTR_FORMAT " size " SIZE_FORMAT, - chunk, chunk->word_size()); - } - free_chunks_put(chunk); -} - Metachunk* ChunkManager::free_chunks_get(size_t word_size) { assert_lock_strong(SpaceManager::expand_lock()); @@ -1700,7 +1698,6 @@ assert(free_list != NULL, "Sanity check"); chunk = free_list->head(); - debug_only(Metachunk* debug_head = chunk;) if (chunk == NULL) { return NULL; @@ -1709,9 +1706,6 @@ // Remove the chunk as the head of the list. free_list->remove_chunk(chunk); - // Chunk is being removed from the chunks free list. - dec_free_chunks_total(chunk->capacity_word_size()); - if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, @@ -1722,28 +1716,29 @@ word_size, FreeBlockDictionary<Metachunk>::atLeast); - if (chunk != NULL) { - if (TraceMetadataHumongousAllocation) { - size_t waste = chunk->word_size() - word_size; - gclog_or_tty->print_cr("Free list allocate humongous chunk size " - SIZE_FORMAT " for requested size " SIZE_FORMAT - " waste " SIZE_FORMAT, - chunk->word_size(), word_size, waste); - } - // Chunk is being removed from the chunks free list. - dec_free_chunks_total(chunk->capacity_word_size()); - } else { + if (chunk == NULL) { return NULL; } + + if (TraceMetadataHumongousAllocation) { + size_t waste = chunk->word_size() - word_size; + gclog_or_tty->print_cr("Free list allocate humongous chunk size " + SIZE_FORMAT " for requested size " SIZE_FORMAT + " waste " SIZE_FORMAT, + chunk->word_size(), word_size, waste); + } } + // Chunk is being removed from the chunks free list. + dec_free_chunks_total(chunk->word_size()); + // Remove it from the links to this freelist chunk->set_next(NULL); chunk->set_prev(NULL); #ifdef ASSERT // Chunk is no longer on any freelist. Setting to false make container_count_slow() // work. - chunk->set_is_free(false); + chunk->set_is_tagged_free(false); #endif chunk->container()->inc_container_count(); @@ -1875,7 +1870,7 @@ for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { Metachunk* chunk = chunks_in_use(i); while (chunk != NULL) { - sum += chunk->capacity_word_size(); + sum += chunk->word_size(); chunk = chunk->next(); } } @@ -1951,12 +1946,12 @@ chunk_word_size = medium_chunk_size(); } - // Might still need a humongous chunk. Enforce an - // eight word granularity to facilitate reuse (some - // wastage but better chance of reuse). + // Might still need a humongous chunk. Enforce + // humongous allocations sizes to be aligned up to + // the smallest chunk size. size_t if_humongous_sized_chunk = align_size_up(word_size + Metachunk::overhead(), - HumongousChunkGranularity); + smallest_chunk_size()); chunk_word_size = MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); @@ -1977,6 +1972,15 @@ return chunk_word_size; } +void SpaceManager::track_metaspace_memory_usage() { + if (is_init_completed()) { + if (is_class()) { + MemoryService::track_compressed_class_memory_usage(); + } + MemoryService::track_metaspace_memory_usage(); + } +} + MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { assert(vs_list()->current_virtual_space() != NULL, "Should have been set"); @@ -2002,15 +2006,20 @@ size_t grow_chunks_by_words = calc_chunk_size(word_size); Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); + MetaWord* mem = NULL; + // If a chunk was available, add it to the in-use chunk list // and do an allocation from it. if (next != NULL) { - Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words); // Add to this manager's list of chunks in use. add_chunk(next, false); - return next->allocate(word_size); + mem = next->allocate(word_size); } - return NULL; + + // Track metaspace memory usage statistic. + track_metaspace_memory_usage(); + + return mem; } void SpaceManager::print_on(outputStream* st) const { @@ -2105,7 +2114,7 @@ // Capture the next link before it is changed // by the call to return_chunk_at_head(); Metachunk* next = cur->next(); - cur->set_is_free(true); + DEBUG_ONLY(cur->set_is_tagged_free(true);) list->return_chunk_at_head(cur); cur = next; } @@ -2177,7 +2186,7 @@ while (humongous_chunks != NULL) { #ifdef ASSERT - humongous_chunks->set_is_free(true); + humongous_chunks->set_is_tagged_free(true); #endif if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", @@ -2186,10 +2195,10 @@ } assert(humongous_chunks->word_size() == (size_t) align_size_up(humongous_chunks->word_size(), - HumongousChunkGranularity), + smallest_chunk_size()), err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT " granularity %d", - humongous_chunks->word_size(), HumongousChunkGranularity)); + humongous_chunks->word_size(), smallest_chunk_size())); Metachunk* next_humongous_chunks = humongous_chunks->next(); humongous_chunks->container()->dec_container_count(); chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); @@ -2341,7 +2350,6 @@ if (p == NULL) { p = allocate_work(raw_word_size); } - Metadebug::deallocate_block_a_lot(this, raw_word_size); return p; } @@ -2366,6 +2374,7 @@ inc_used_metrics(word_size); return current_chunk()->allocate(word_size); // caller handles null result } + if (current_chunk() != NULL) { result = current_chunk()->allocate(word_size); } @@ -2373,7 +2382,8 @@ if (result == NULL) { result = grow_and_allocate(word_size); } - if (result != 0) { + + if (result != NULL) { inc_used_metrics(word_size); assert(result != (MetaWord*) chunks_in_use(MediumIndex), "Head of the list is being allocated"); @@ -2438,7 +2448,7 @@ curr->print_on(out); curr_total += curr->word_size(); used += curr->used_word_size(); - capacity += curr->capacity_word_size(); + capacity += curr->word_size(); waste += curr->free_word_size() + curr->overhead();; } } @@ -2639,24 +2649,26 @@ void MetaspaceAux::print_on(outputStream* out) { Metaspace::MetadataType nct = Metaspace::NonClassType; - out->print_cr(" Metaspace total " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", - allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K); - - out->print_cr(" data space " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", - allocated_capacity_bytes(nct)/K, - allocated_used_bytes(nct)/K, - reserved_bytes(nct)/K); + out->print_cr(" Metaspace " + "used " SIZE_FORMAT "K, " + "capacity " SIZE_FORMAT "K, " + "committed " SIZE_FORMAT "K, " + "reserved " SIZE_FORMAT "K", + allocated_used_bytes()/K, + allocated_capacity_bytes()/K, + committed_bytes()/K, + reserved_bytes()/K); + if (Metaspace::using_class_space()) { Metaspace::MetadataType ct = Metaspace::ClassType; out->print_cr(" class space " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", + "used " SIZE_FORMAT "K, " + "capacity " SIZE_FORMAT "K, " + "committed " SIZE_FORMAT "K, " + "reserved " SIZE_FORMAT "K", + allocated_used_bytes(ct)/K, allocated_capacity_bytes(ct)/K, - allocated_used_bytes(ct)/K, + committed_bytes(ct)/K, reserved_bytes(ct)/K); } } @@ -2808,6 +2820,9 @@ size_t Metaspace::_first_chunk_word_size = 0; size_t Metaspace::_first_class_chunk_word_size = 0; +size_t Metaspace::_commit_alignment = 0; +size_t Metaspace::_reserve_alignment = 0; + Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { initialize(lock, type); } @@ -2828,6 +2843,8 @@ #define VIRTUALSPACEMULTIPLIER 2 #ifdef _LP64 +static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); + void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { // Figure out the narrow_klass_base and the narrow_klass_shift. The // narrow_klass_base is the lower of the metaspace base and the cds base @@ -2837,14 +2854,22 @@ address higher_address; if (UseSharedSpaces) { higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), - (address)(metaspace_base + class_metaspace_size())); + (address)(metaspace_base + compressed_class_space_size())); lower_base = MIN2(metaspace_base, cds_base); } else { - higher_address = metaspace_base + class_metaspace_size(); + higher_address = metaspace_base + compressed_class_space_size(); lower_base = metaspace_base; + + uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; + // If compressed class space fits in lower 32G, we don't need a base. + if (higher_address <= (address)klass_encoding_max) { + lower_base = 0; // effectively lower base is zero. + } } + Universe::set_narrow_klass_base(lower_base); - if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) { + + if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { Universe::set_narrow_klass_shift(0); } else { assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); @@ -2859,31 +2884,40 @@ assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); address lower_base = MIN2((address)metaspace_base, cds_base); address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), - (address)(metaspace_base + class_metaspace_size())); - return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint); + (address)(metaspace_base + compressed_class_space_size())); + return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); } // Try to allocate the metaspace at the requested addr. void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { assert(using_class_space(), "called improperly"); assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); - assert(class_metaspace_size() < KlassEncodingMetaspaceMax, + assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, "Metaspace size is too big"); - - ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(), - os::vm_allocation_granularity(), - false, requested_addr, 0); + assert_is_ptr_aligned(requested_addr, _reserve_alignment); + assert_is_ptr_aligned(cds_base, _reserve_alignment); + assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); + + // Don't use large pages for the class space. + bool large_pages = false; + + ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), + _reserve_alignment, + large_pages, + requested_addr, 0); if (!metaspace_rs.is_reserved()) { if (UseSharedSpaces) { + size_t increment = align_size_up(1*G, _reserve_alignment); + // Keep trying to allocate the metaspace, increasing the requested_addr // by 1GB each time, until we reach an address that will no longer allow // use of CDS with compressed klass pointers. char *addr = requested_addr; - while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) && - can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) { - addr = addr + 1*G; - metaspace_rs = ReservedSpace(class_metaspace_size(), - os::vm_allocation_granularity(), false, addr, 0); + while (!metaspace_rs.is_reserved() && (addr + increment > addr) && + can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { + addr = addr + increment; + metaspace_rs = ReservedSpace(compressed_class_space_size(), + _reserve_alignment, large_pages, addr, 0); } } @@ -2893,11 +2927,11 @@ // initialization has happened that depends on UseCompressedClassPointers. // So, UseCompressedClassPointers cannot be turned off at this point. if (!metaspace_rs.is_reserved()) { - metaspace_rs = ReservedSpace(class_metaspace_size(), - os::vm_allocation_granularity(), false); + metaspace_rs = ReservedSpace(compressed_class_space_size(), + _reserve_alignment, large_pages); if (!metaspace_rs.is_reserved()) { vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", - class_metaspace_size())); + compressed_class_space_size())); } } } @@ -2919,8 +2953,8 @@ if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, Universe::narrow_klass_base(), Universe::narrow_klass_shift()); - gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, - class_metaspace_size(), metaspace_rs.base(), requested_addr); + gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, + compressed_class_space_size(), metaspace_rs.base(), requested_addr); } } @@ -2933,34 +2967,91 @@ assert(using_class_space(), "Must be using class space"); _class_space_list = new VirtualSpaceList(rs); _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); + + if (!_class_space_list->initialization_succeeded()) { + vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); + } } #endif +void Metaspace::ergo_initialize() { + if (DumpSharedSpaces) { + // Using large pages when dumping the shared archive is currently not implemented. + FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); + } + + size_t page_size = os::vm_page_size(); + if (UseLargePages && UseLargePagesInMetaspace) { + page_size = os::large_page_size(); + } + + _commit_alignment = page_size; + _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); + + // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will + // override if MaxMetaspaceSize was set on the command line or not. + // This information is needed later to conform to the specification of the + // java.lang.management.MemoryUsage API. + // + // Ideally, we would be able to set the default value of MaxMetaspaceSize in + // globals.hpp to the aligned value, but this is not possible, since the + // alignment depends on other flags being parsed. + MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); + + if (MetaspaceSize > MaxMetaspaceSize) { + MetaspaceSize = MaxMetaspaceSize; + } + + MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); + + assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); + + if (MetaspaceSize < 256*K) { + vm_exit_during_initialization("Too small initial Metaspace size"); + } + + MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); + MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); + + CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); + set_compressed_class_space_size(CompressedClassSpaceSize); +} + void Metaspace::global_initialize() { // Initialize the alignment for shared spaces. int max_alignment = os::vm_page_size(); size_t cds_total = 0; - set_class_metaspace_size(align_size_up(CompressedClassSpaceSize, - os::vm_allocation_granularity())); - MetaspaceShared::set_max_alignment(max_alignment); if (DumpSharedSpaces) { - SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); + SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); - SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); - SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); + SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); + SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); // Initialize with the sum of the shared space sizes. The read-only // and read write metaspace chunks will be allocated out of this and the // remainder is the misc code and data chunks. cds_total = FileMapInfo::shared_spaces_size(); + cds_total = align_size_up(cds_total, _reserve_alignment); _space_list = new VirtualSpaceList(cds_total/wordSize); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); + if (!_space_list->initialization_succeeded()) { + vm_exit_during_initialization("Unable to dump shared archive.", NULL); + } + #ifdef _LP64 + if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { + vm_exit_during_initialization("Unable to dump shared archive.", + err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" + SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " + "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(), + cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); + } + // Set the compressed klass pointer base so that decoding of these pointers works // properly when creating the shared archive. assert(UseCompressedOops && UseCompressedClassPointers, @@ -2971,9 +3062,6 @@ _space_list->current_virtual_space()->bottom()); } - // Set the shift to zero. - assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total, - "CDS region is too large"); Universe::set_narrow_klass_shift(0); #endif @@ -2992,12 +3080,12 @@ // Map in spaces now also if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { FileMapInfo::set_current_info(mapinfo); + cds_total = FileMapInfo::shared_spaces_size(); + cds_address = (address)mapinfo->region_base(0); } else { assert(!mapinfo->is_open() && !UseSharedSpaces, "archive file not closed or shared spaces not disabled."); } - cds_total = FileMapInfo::shared_spaces_size(); - cds_address = (address)mapinfo->region_base(0); } #ifdef _LP64 @@ -3005,9 +3093,12 @@ // above the heap and above the CDS area (if it exists). if (using_class_space()) { if (UseSharedSpaces) { - allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address); + char* cds_end = (char*)(cds_address + cds_total); + cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); + allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); } else { - allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0); + char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); + allocate_metaspace_compressed_klass_ptrs(base, 0); } } #endif @@ -3023,11 +3114,19 @@ _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); // Arbitrarily set the initial virtual space to a multiple // of the boot class loader size. - size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size(); + size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; + word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); + // Initialize the list of virtual spaces. _space_list = new VirtualSpaceList(word_size); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); + + if (!_space_list->initialization_succeeded()) { + vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); + } } + + MetaspaceGC::initialize(); } Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, @@ -3039,7 +3138,7 @@ return chunk; } - return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch); + return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); } void Metaspace::initialize(Mutex* lock, MetaspaceType type) { @@ -3112,19 +3211,18 @@ } MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { - MetaWord* result; - MetaspaceGC::set_expand_after_GC(true); - size_t before_inc = MetaspaceGC::capacity_until_GC(); - size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; - MetaspaceGC::inc_capacity_until_GC(delta_bytes); + size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); + assert(delta_bytes > 0, "Must be"); + + size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes); + size_t before_inc = after_inc - delta_bytes; + if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT - " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); + " to " SIZE_FORMAT, before_inc, after_inc); } - result = allocate(word_size, mdtype); - - return result; + return allocate(word_size, mdtype); } // Space allocated in the Metaspace. This may @@ -3206,69 +3304,108 @@ } } -Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, + +MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, bool read_only, MetaspaceObj::Type type, TRAPS) { if (HAS_PENDING_EXCEPTION) { assert(false, "Should not allocate with exception pending"); return NULL; // caller does a CHECK_NULL too } - MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; - - // SSS: Should we align the allocations and make sure the sizes are aligned. - MetaWord* result = NULL; - assert(loader_data != NULL, "Should never pass around a NULL loader_data. " "ClassLoaderData::the_null_class_loader_data() should have been used."); + // Allocate in metaspaces without taking out a lock, because it deadlocks // with the SymbolTable_lock. Dumping is single threaded for now. We'll have // to revisit this for application class data sharing. if (DumpSharedSpaces) { assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); - result = space->allocate(word_size, NonClassType); + MetaWord* result = space->allocate(word_size, NonClassType); if (result == NULL) { report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); - } else { - space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); } - return Metablock::initialize(result, word_size); + + space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); + + // Zero initialize. + Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); + + return result; } - result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); + MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; + + // Try to allocate metadata. + MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); + + if (result == NULL) { + // Allocation failed. + if (is_init_completed()) { + // Only start a GC if the bootstrapping has completed. + + // Try to clean out some memory and retry. + result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( + loader_data, word_size, mdtype); + } + } if (result == NULL) { - // Try to clean out some memory and retry. - result = - Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( - loader_data, word_size, mdtype); - - // If result is still null, we are out of memory. - if (result == NULL) { - if (Verbose && TraceMetadataChunkAllocation) { - gclog_or_tty->print_cr("Metaspace allocation failed for size " - SIZE_FORMAT, word_size); - if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty); - MetaspaceAux::dump(gclog_or_tty); - } - // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support - const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" : - "Metadata space"; - report_java_out_of_memory(space_string); - - if (JvmtiExport::should_post_resource_exhausted()) { - JvmtiExport::post_resource_exhausted( - JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, - space_string); - } - if (is_class_space_allocation(mdtype)) { - THROW_OOP_0(Universe::out_of_memory_error_class_metaspace()); - } else { - THROW_OOP_0(Universe::out_of_memory_error_metaspace()); - } + report_metadata_oome(loader_data, word_size, mdtype, CHECK_NULL); + } + + // Zero initialize. + Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); + + return result; +} + +size_t Metaspace::class_chunk_size(size_t word_size) { + assert(using_class_space(), "Has to use class space"); + return class_vsm()->calc_chunk_size(word_size); +} + +void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) { + // If result is still null, we are out of memory. + if (Verbose && TraceMetadataChunkAllocation) { + gclog_or_tty->print_cr("Metaspace allocation failed for size " + SIZE_FORMAT, word_size); + if (loader_data->metaspace_or_null() != NULL) { + loader_data->dump(gclog_or_tty); } + MetaspaceAux::dump(gclog_or_tty); } - return Metablock::initialize(result, word_size); + + bool out_of_compressed_class_space = false; + if (is_class_space_allocation(mdtype)) { + Metaspace* metaspace = loader_data->metaspace_non_null(); + out_of_compressed_class_space = + MetaspaceAux::committed_bytes(Metaspace::ClassType) + + (metaspace->class_chunk_size(word_size) * BytesPerWord) > + CompressedClassSpaceSize; + } + + // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support + const char* space_string = out_of_compressed_class_space ? + "Compressed class space" : "Metaspace"; + + report_java_out_of_memory(space_string); + + if (JvmtiExport::should_post_resource_exhausted()) { + JvmtiExport::post_resource_exhausted( + JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, + space_string); + } + + if (!is_init_completed()) { + vm_exit_during_initialization("OutOfMemoryError", space_string); + } + + if (out_of_compressed_class_space) { + THROW_OOP(Universe::out_of_memory_error_class_metaspace()); + } else { + THROW_OOP(Universe::out_of_memory_error_metaspace()); + } } void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { @@ -3420,4 +3557,94 @@ TestMetaspaceAuxTest::test(); } +class TestVirtualSpaceNodeTest { + static void chunk_up(size_t words_left, size_t& num_medium_chunks, + size_t& num_small_chunks, + size_t& num_specialized_chunks) { + num_medium_chunks = words_left / MediumChunk; + words_left = words_left % MediumChunk; + + num_small_chunks = words_left / SmallChunk; + words_left = words_left % SmallChunk; + // how many specialized chunks can we get? + num_specialized_chunks = words_left / SpecializedChunk; + assert(words_left % SpecializedChunk == 0, "should be nothing left"); + } + + public: + static void test() { + MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); + const size_t vsn_test_size_words = MediumChunk * 4; + const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; + + // The chunk sizes must be multiples of eachother, or this will fail + STATIC_ASSERT(MediumChunk % SmallChunk == 0); + STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); + + { // No committed memory in VSN + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + vsn.initialize(); + vsn.retire(&cm); + assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); + } + + { // All of VSN is committed, half is used by chunks + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + vsn.initialize(); + vsn.expand_by(vsn_test_size_words, vsn_test_size_words); + vsn.get_chunk_vs(MediumChunk); + vsn.get_chunk_vs(MediumChunk); + vsn.retire(&cm); + assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); + assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); + } + + { // 4 pages of VSN is committed, some is used by chunks + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; + assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size"); + vsn.initialize(); + vsn.expand_by(page_chunks, page_chunks); + vsn.get_chunk_vs(SmallChunk); + vsn.get_chunk_vs(SpecializedChunk); + vsn.retire(&cm); + + // committed - used = words left to retire + const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; + + size_t num_medium_chunks, num_small_chunks, num_spec_chunks; + chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); + + assert(num_medium_chunks == 0, "should not get any medium chunks"); + assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); + assert(cm.sum_free_chunks() == words_left, "sizes should add up"); + } + + { // Half of VSN is committed, a humongous chunk is used + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + vsn.initialize(); + vsn.expand_by(MediumChunk * 2, MediumChunk * 2); + vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk + vsn.retire(&cm); + + const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); + size_t num_medium_chunks, num_small_chunks, num_spec_chunks; + chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); + + assert(num_medium_chunks == 0, "should not get any medium chunks"); + assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); + assert(cm.sum_free_chunks() == words_left, "sizes should add up"); + } + + } +}; + +void TestVirtualSpaceNode_test() { + TestVirtualSpaceNodeTest::test(); +} + #endif