comparison src/share/vm/memory/metaspace.cpp @ 13102:f9f4503a4ab5

Merge
author Christos Kotselidis <christos.kotselidis@oracle.com>
date Thu, 21 Nov 2013 15:04:54 +0100
parents 11b116661830
children fa76dce60db7 ce86c36b8921
comparison
equal deleted inserted replaced
13101:790ebab62d23 13102:f9f4503a4ab5
27 #include "memory/binaryTreeDictionary.hpp" 27 #include "memory/binaryTreeDictionary.hpp"
28 #include "memory/freeList.hpp" 28 #include "memory/freeList.hpp"
29 #include "memory/collectorPolicy.hpp" 29 #include "memory/collectorPolicy.hpp"
30 #include "memory/filemap.hpp" 30 #include "memory/filemap.hpp"
31 #include "memory/freeList.hpp" 31 #include "memory/freeList.hpp"
32 #include "memory/metablock.hpp" 32 #include "memory/gcLocker.hpp"
33 #include "memory/metachunk.hpp" 33 #include "memory/metachunk.hpp"
34 #include "memory/metaspace.hpp" 34 #include "memory/metaspace.hpp"
35 #include "memory/metaspaceShared.hpp" 35 #include "memory/metaspaceShared.hpp"
36 #include "memory/resourceArea.hpp" 36 #include "memory/resourceArea.hpp"
37 #include "memory/universe.hpp" 37 #include "memory/universe.hpp"
38 #include "runtime/atomic.inline.hpp"
38 #include "runtime/globals.hpp" 39 #include "runtime/globals.hpp"
40 #include "runtime/init.hpp"
39 #include "runtime/java.hpp" 41 #include "runtime/java.hpp"
40 #include "runtime/mutex.hpp" 42 #include "runtime/mutex.hpp"
41 #include "runtime/orderAccess.hpp" 43 #include "runtime/orderAccess.hpp"
42 #include "services/memTracker.hpp" 44 #include "services/memTracker.hpp"
45 #include "services/memoryService.hpp"
43 #include "utilities/copy.hpp" 46 #include "utilities/copy.hpp"
44 #include "utilities/debug.hpp" 47 #include "utilities/debug.hpp"
45 48
46 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary; 49 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
47 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary; 50 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
48 // Define this macro to enable slow integrity checking of 51
49 // the free chunk lists 52 // Set this constant to enable slow integrity checking of the free chunk lists
50 const bool metaspace_slow_verify = false; 53 const bool metaspace_slow_verify = false;
51 54
52 // Parameters for stress mode testing
53 const uint metadata_deallocate_a_lot_block = 10;
54 const uint metadata_deallocate_a_lock_chunk = 3;
55 size_t const allocation_from_dictionary_limit = 4 * K; 55 size_t const allocation_from_dictionary_limit = 4 * K;
56 56
57 MetaWord* last_allocated = 0; 57 MetaWord* last_allocated = 0;
58 58
59 size_t Metaspace::_class_metaspace_size; 59 size_t Metaspace::_compressed_class_space_size;
60 60
61 // Used in declarations in SpaceManager and ChunkManager 61 // Used in declarations in SpaceManager and ChunkManager
62 enum ChunkIndex { 62 enum ChunkIndex {
63 ZeroIndex = 0, 63 ZeroIndex = 0,
64 SpecializedIndex = ZeroIndex, 64 SpecializedIndex = ZeroIndex,
73 ClassSpecializedChunk = 128, 73 ClassSpecializedChunk = 128,
74 SpecializedChunk = 128, 74 SpecializedChunk = 128,
75 ClassSmallChunk = 256, 75 ClassSmallChunk = 256,
76 SmallChunk = 512, 76 SmallChunk = 512,
77 ClassMediumChunk = 4 * K, 77 ClassMediumChunk = 4 * K,
78 MediumChunk = 8 * K, 78 MediumChunk = 8 * K
79 HumongousChunkGranularity = 8
80 }; 79 };
81 80
82 static ChunkIndex next_chunk_index(ChunkIndex i) { 81 static ChunkIndex next_chunk_index(ChunkIndex i) {
83 assert(i < NumberOfInUseLists, "Out of bound"); 82 assert(i < NumberOfInUseLists, "Out of bound");
84 return (ChunkIndex) (i+1); 83 return (ChunkIndex) (i+1);
85 } 84 }
86 85
87 // Originally _capacity_until_GC was set to MetaspaceSize here but 86 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
88 // the default MetaspaceSize before argument processing was being
89 // used which was not the desired value. See the code
90 // in should_expand() to see how the initialization is handled
91 // now.
92 size_t MetaspaceGC::_capacity_until_GC = 0;
93 bool MetaspaceGC::_expand_after_GC = false;
94 uint MetaspaceGC::_shrink_factor = 0; 87 uint MetaspaceGC::_shrink_factor = 0;
95 bool MetaspaceGC::_should_concurrent_collect = false; 88 bool MetaspaceGC::_should_concurrent_collect = false;
96 89
97 // Blocks of space for metadata are allocated out of Metachunks.
98 //
99 // Metachunk are allocated out of MetadataVirtualspaces and once
100 // allocated there is no explicit link between a Metachunk and
101 // the MetadataVirtualspaces from which it was allocated.
102 //
103 // Each SpaceManager maintains a
104 // list of the chunks it is using and the current chunk. The current
105 // chunk is the chunk from which allocations are done. Space freed in
106 // a chunk is placed on the free list of blocks (BlockFreelist) and
107 // reused from there.
108
109 typedef class FreeList<Metachunk> ChunkList; 90 typedef class FreeList<Metachunk> ChunkList;
110 91
111 // Manages the global free lists of chunks. 92 // Manages the global free lists of chunks.
112 // Has three lists of free chunks, and a total size and
113 // count that includes all three
114
115 class ChunkManager : public CHeapObj<mtInternal> { 93 class ChunkManager : public CHeapObj<mtInternal> {
94 friend class TestVirtualSpaceNodeTest;
116 95
117 // Free list of chunks of different sizes. 96 // Free list of chunks of different sizes.
118 // SpecializedChunk 97 // SpecializedChunk
119 // SmallChunk 98 // SmallChunk
120 // MediumChunk 99 // MediumChunk
121 // HumongousChunk 100 // HumongousChunk
122 ChunkList _free_chunks[NumberOfFreeLists]; 101 ChunkList _free_chunks[NumberOfFreeLists];
123
124 102
125 // HumongousChunk 103 // HumongousChunk
126 ChunkTreeDictionary _humongous_dictionary; 104 ChunkTreeDictionary _humongous_dictionary;
127 105
128 // ChunkManager in all lists of this type 106 // ChunkManager in all lists of this type
166 _free_chunks[MediumIndex].set_size(medium_size); 144 _free_chunks[MediumIndex].set_size(medium_size);
167 } 145 }
168 146
169 // add or delete (return) a chunk to the global freelist. 147 // add or delete (return) a chunk to the global freelist.
170 Metachunk* chunk_freelist_allocate(size_t word_size); 148 Metachunk* chunk_freelist_allocate(size_t word_size);
171 void chunk_freelist_deallocate(Metachunk* chunk);
172 149
173 // Map a size to a list index assuming that there are lists 150 // Map a size to a list index assuming that there are lists
174 // for special, small, medium, and humongous chunks. 151 // for special, small, medium, and humongous chunks.
175 static ChunkIndex list_index(size_t size); 152 static ChunkIndex list_index(size_t size);
176 153
200 ChunkList* free_chunks(ChunkIndex index); 177 ChunkList* free_chunks(ChunkIndex index);
201 178
202 // Returns the list for the given chunk word size. 179 // Returns the list for the given chunk word size.
203 ChunkList* find_free_chunks_list(size_t word_size); 180 ChunkList* find_free_chunks_list(size_t word_size);
204 181
205 // Add and remove from a list by size. Selects 182 // Remove from a list by size. Selects list based on size of chunk.
206 // list based on size of chunk.
207 void free_chunks_put(Metachunk* chuck);
208 Metachunk* free_chunks_get(size_t chunk_word_size); 183 Metachunk* free_chunks_get(size_t chunk_word_size);
209 184
210 // Debug support 185 // Debug support
211 void verify(); 186 void verify();
212 void slow_verify() { 187 void slow_verify() {
230 205
231 // Used to manage the free list of Metablocks (a block corresponds 206 // Used to manage the free list of Metablocks (a block corresponds
232 // to the allocation of a quantum of metadata). 207 // to the allocation of a quantum of metadata).
233 class BlockFreelist VALUE_OBJ_CLASS_SPEC { 208 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
234 BlockTreeDictionary* _dictionary; 209 BlockTreeDictionary* _dictionary;
235 static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
236 210
237 // Only allocate and split from freelist if the size of the allocation 211 // Only allocate and split from freelist if the size of the allocation
238 // is at least 1/4th the size of the available block. 212 // is at least 1/4th the size of the available block.
239 const static int WasteMultiplier = 4; 213 const static int WasteMultiplier = 4;
240 214
258 } 232 }
259 233
260 void print_on(outputStream* st) const; 234 void print_on(outputStream* st) const;
261 }; 235 };
262 236
237 // A VirtualSpaceList node.
263 class VirtualSpaceNode : public CHeapObj<mtClass> { 238 class VirtualSpaceNode : public CHeapObj<mtClass> {
264 friend class VirtualSpaceList; 239 friend class VirtualSpaceList;
265 240
266 // Link to next VirtualSpaceNode 241 // Link to next VirtualSpaceNode
267 VirtualSpaceNode* _next; 242 VirtualSpaceNode* _next;
280 255
281 // The first Metachunk will be allocated at the bottom of the 256 // The first Metachunk will be allocated at the bottom of the
282 // VirtualSpace 257 // VirtualSpace
283 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 258 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
284 259
260 // Committed but unused space in the virtual space
261 size_t free_words_in_vs() const;
285 public: 262 public:
286 263
287 VirtualSpaceNode(size_t byte_size); 264 VirtualSpaceNode(size_t byte_size);
288 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 265 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
289 ~VirtualSpaceNode(); 266 ~VirtualSpaceNode();
291 // Convenience functions for logical bottom and end 268 // Convenience functions for logical bottom and end
292 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 269 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
293 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 270 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
294 271
295 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 272 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
296 size_t expanded_words() const { return _virtual_space.committed_size() / BytesPerWord; }
297 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 273 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
274
275 bool is_pre_committed() const { return _virtual_space.special(); }
298 276
299 // address of next available space in _virtual_space; 277 // address of next available space in _virtual_space;
300 // Accessors 278 // Accessors
301 VirtualSpaceNode* next() { return _next; } 279 VirtualSpaceNode* next() { return _next; }
302 void set_next(VirtualSpaceNode* v) { _next = v; } 280 void set_next(VirtualSpaceNode* v) { _next = v; }
323 #endif 301 #endif
324 302
325 // used and capacity in this single entry in the list 303 // used and capacity in this single entry in the list
326 size_t used_words_in_vs() const; 304 size_t used_words_in_vs() const;
327 size_t capacity_words_in_vs() const; 305 size_t capacity_words_in_vs() const;
328 size_t free_words_in_vs() const;
329 306
330 bool initialize(); 307 bool initialize();
331 308
332 // get space from the virtual space 309 // get space from the virtual space
333 Metachunk* take_from_committed(size_t chunk_word_size); 310 Metachunk* take_from_committed(size_t chunk_word_size);
335 // Allocate a chunk from the virtual space and return it. 312 // Allocate a chunk from the virtual space and return it.
336 Metachunk* get_chunk_vs(size_t chunk_word_size); 313 Metachunk* get_chunk_vs(size_t chunk_word_size);
337 314
338 // Expands/shrinks the committed space in a virtual space. Delegates 315 // Expands/shrinks the committed space in a virtual space. Delegates
339 // to Virtualspace 316 // to Virtualspace
340 bool expand_by(size_t words, bool pre_touch = false); 317 bool expand_by(size_t min_words, size_t preferred_words);
341 318
342 // In preparation for deleting this node, remove all the chunks 319 // In preparation for deleting this node, remove all the chunks
343 // in the node from any freelist. 320 // in the node from any freelist.
344 void purge(ChunkManager* chunk_manager); 321 void purge(ChunkManager* chunk_manager);
322
323 // If an allocation doesn't fit in the current node a new node is created.
324 // Allocate chunks out of the remaining committed space in this node
325 // to avoid wasting that memory.
326 // This always adds up because all the chunk sizes are multiples of
327 // the smallest chunk size.
328 void retire(ChunkManager* chunk_manager);
345 329
346 #ifdef ASSERT 330 #ifdef ASSERT
347 // Debug support 331 // Debug support
348 void mangle(); 332 void mangle();
349 #endif 333 #endif
350 334
351 void print_on(outputStream* st) const; 335 void print_on(outputStream* st) const;
352 }; 336 };
353 337
338 #define assert_is_ptr_aligned(ptr, alignment) \
339 assert(is_ptr_aligned(ptr, alignment), \
340 err_msg(PTR_FORMAT " is not aligned to " \
341 SIZE_FORMAT, ptr, alignment))
342
343 #define assert_is_size_aligned(size, alignment) \
344 assert(is_size_aligned(size, alignment), \
345 err_msg(SIZE_FORMAT " is not aligned to " \
346 SIZE_FORMAT, size, alignment))
347
348
349 // Decide if large pages should be committed when the memory is reserved.
350 static bool should_commit_large_pages_when_reserving(size_t bytes) {
351 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
352 size_t words = bytes / BytesPerWord;
353 bool is_class = false; // We never reserve large pages for the class space.
354 if (MetaspaceGC::can_expand(words, is_class) &&
355 MetaspaceGC::allowed_expansion() >= words) {
356 return true;
357 }
358 }
359
360 return false;
361 }
362
354 // byte_size is the size of the associated virtualspace. 363 // byte_size is the size of the associated virtualspace.
355 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 364 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
356 // align up to vm allocation granularity 365 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
357 byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
358 366
359 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 367 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
360 // configurable address, generally at the top of the Java heap so other 368 // configurable address, generally at the top of the Java heap so other
361 // memory addresses don't conflict. 369 // memory addresses don't conflict.
362 if (DumpSharedSpaces) { 370 if (DumpSharedSpaces) {
363 char* shared_base = (char*)SharedBaseAddress; 371 bool large_pages = false; // No large pages when dumping the CDS archive.
364 _rs = ReservedSpace(byte_size, 0, false, shared_base, 0); 372 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
373
374 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
365 if (_rs.is_reserved()) { 375 if (_rs.is_reserved()) {
366 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 376 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
367 } else { 377 } else {
368 // Get a mmap region anywhere if the SharedBaseAddress fails. 378 // Get a mmap region anywhere if the SharedBaseAddress fails.
369 _rs = ReservedSpace(byte_size); 379 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
370 } 380 }
371 MetaspaceShared::set_shared_rs(&_rs); 381 MetaspaceShared::set_shared_rs(&_rs);
372 } else { 382 } else {
373 _rs = ReservedSpace(byte_size); 383 bool large_pages = should_commit_large_pages_when_reserving(bytes);
374 } 384
375 385 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
376 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 386 }
387
388 if (_rs.is_reserved()) {
389 assert(_rs.base() != NULL, "Catch if we get a NULL address");
390 assert(_rs.size() != 0, "Catch if we get a 0 size");
391 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
392 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
393
394 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
395 }
377 } 396 }
378 397
379 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 398 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
380 Metachunk* chunk = first_chunk(); 399 Metachunk* chunk = first_chunk();
381 Metachunk* invalid_chunk = (Metachunk*) top(); 400 Metachunk* invalid_chunk = (Metachunk*) top();
382 while (chunk < invalid_chunk ) { 401 while (chunk < invalid_chunk ) {
383 assert(chunk->is_free(), "Should be marked free"); 402 assert(chunk->is_tagged_free(), "Should be tagged free");
384 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 403 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
385 chunk_manager->remove_chunk(chunk); 404 chunk_manager->remove_chunk(chunk);
386 assert(chunk->next() == NULL && 405 assert(chunk->next() == NULL &&
387 chunk->prev() == NULL, 406 chunk->prev() == NULL,
388 "Was not removed from its list"); 407 "Was not removed from its list");
389 chunk = (Metachunk*) next; 408 chunk = (Metachunk*) next;
390 } 409 }
391 } 410 }
392 411
393 #ifdef ASSERT 412 #ifdef ASSERT
394 uint VirtualSpaceNode::container_count_slow() { 413 uint VirtualSpaceNode::container_count_slow() {
398 while (chunk < invalid_chunk ) { 417 while (chunk < invalid_chunk ) {
399 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 418 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
400 // Don't count the chunks on the free lists. Those are 419 // Don't count the chunks on the free lists. Those are
401 // still part of the VirtualSpaceNode but not currently 420 // still part of the VirtualSpaceNode but not currently
402 // counted. 421 // counted.
403 if (!chunk->is_free()) { 422 if (!chunk->is_tagged_free()) {
404 count++; 423 count++;
405 } 424 }
406 chunk = (Metachunk*) next; 425 chunk = (Metachunk*) next;
407 } 426 }
408 return count; 427 return count;
409 } 428 }
410 #endif 429 #endif
411 430
412 // List of VirtualSpaces for metadata allocation. 431 // List of VirtualSpaces for metadata allocation.
413 // It has a _next link for singly linked list and a MemRegion
414 // for total space in the VirtualSpace.
415 class VirtualSpaceList : public CHeapObj<mtClass> { 432 class VirtualSpaceList : public CHeapObj<mtClass> {
416 friend class VirtualSpaceNode; 433 friend class VirtualSpaceNode;
417 434
418 enum VirtualSpaceSizes { 435 enum VirtualSpaceSizes {
419 VirtualSpaceSize = 256 * K 436 VirtualSpaceSize = 256 * K
420 }; 437 };
421 438
422 // Global list of virtual spaces
423 // Head of the list 439 // Head of the list
424 VirtualSpaceNode* _virtual_space_list; 440 VirtualSpaceNode* _virtual_space_list;
425 // virtual space currently being used for allocations 441 // virtual space currently being used for allocations
426 VirtualSpaceNode* _current_virtual_space; 442 VirtualSpaceNode* _current_virtual_space;
427 443
428 // Can this virtual list allocate >1 spaces? Also, used to determine 444 // Is this VirtualSpaceList used for the compressed class space
429 // whether to allocate unlimited small chunks in this virtual space
430 bool _is_class; 445 bool _is_class;
431 bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
432 446
433 // Sum of reserved and committed memory in the virtual spaces 447 // Sum of reserved and committed memory in the virtual spaces
434 size_t _reserved_words; 448 size_t _reserved_words;
435 size_t _committed_words; 449 size_t _committed_words;
436 450
451 void link_vs(VirtualSpaceNode* new_entry); 465 void link_vs(VirtualSpaceNode* new_entry);
452 466
453 // Get another virtual space and add it to the list. This 467 // Get another virtual space and add it to the list. This
454 // is typically prompted by a failed attempt to allocate a chunk 468 // is typically prompted by a failed attempt to allocate a chunk
455 // and is typically followed by the allocation of a chunk. 469 // and is typically followed by the allocation of a chunk.
456 bool grow_vs(size_t vs_word_size); 470 bool create_new_virtual_space(size_t vs_word_size);
471
472 // Chunk up the unused committed space in the current
473 // virtual space and add the chunks to the free list.
474 void retire_current_virtual_space();
457 475
458 public: 476 public:
459 VirtualSpaceList(size_t word_size); 477 VirtualSpaceList(size_t word_size);
460 VirtualSpaceList(ReservedSpace rs); 478 VirtualSpaceList(ReservedSpace rs);
461 479
463 481
464 Metachunk* get_new_chunk(size_t word_size, 482 Metachunk* get_new_chunk(size_t word_size,
465 size_t grow_chunks_by_words, 483 size_t grow_chunks_by_words,
466 size_t medium_chunk_bunch); 484 size_t medium_chunk_bunch);
467 485
468 bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false); 486 bool expand_node_by(VirtualSpaceNode* node,
469 487 size_t min_words,
470 // Get the first chunk for a Metaspace. Used for 488 size_t preferred_words);
471 // special cases such as the boot class loader, reflection 489
472 // class loader and anonymous class loader. 490 bool expand_by(size_t min_words,
473 Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch); 491 size_t preferred_words);
474 492
475 VirtualSpaceNode* current_virtual_space() { 493 VirtualSpaceNode* current_virtual_space() {
476 return _current_virtual_space; 494 return _current_virtual_space;
477 } 495 }
478 496
479 bool is_class() const { return _is_class; } 497 bool is_class() const { return _is_class; }
480 498
481 // Allocate the first virtualspace. 499 bool initialization_succeeded() { return _virtual_space_list != NULL; }
482 void initialize(size_t word_size);
483 500
484 size_t reserved_words() { return _reserved_words; } 501 size_t reserved_words() { return _reserved_words; }
485 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 502 size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
486 size_t committed_words() { return _committed_words; } 503 size_t committed_words() { return _committed_words; }
487 size_t committed_bytes() { return committed_words() * BytesPerWord; } 504 size_t committed_bytes() { return committed_words() * BytesPerWord; }
520 }; 537 };
521 }; 538 };
522 539
523 class Metadebug : AllStatic { 540 class Metadebug : AllStatic {
524 // Debugging support for Metaspaces 541 // Debugging support for Metaspaces
525 static int _deallocate_block_a_lot_count;
526 static int _deallocate_chunk_a_lot_count;
527 static int _allocation_fail_alot_count; 542 static int _allocation_fail_alot_count;
528 543
529 public: 544 public:
530 static int deallocate_block_a_lot_count() {
531 return _deallocate_block_a_lot_count;
532 }
533 static void set_deallocate_block_a_lot_count(int v) {
534 _deallocate_block_a_lot_count = v;
535 }
536 static void inc_deallocate_block_a_lot_count() {
537 _deallocate_block_a_lot_count++;
538 }
539 static int deallocate_chunk_a_lot_count() {
540 return _deallocate_chunk_a_lot_count;
541 }
542 static void reset_deallocate_chunk_a_lot_count() {
543 _deallocate_chunk_a_lot_count = 1;
544 }
545 static void inc_deallocate_chunk_a_lot_count() {
546 _deallocate_chunk_a_lot_count++;
547 }
548 545
549 static void init_allocation_fail_alot_count(); 546 static void init_allocation_fail_alot_count();
550 #ifdef ASSERT 547 #ifdef ASSERT
551 static bool test_metadata_failure(); 548 static bool test_metadata_failure();
552 #endif 549 #endif
553
554 static void deallocate_chunk_a_lot(SpaceManager* sm,
555 size_t chunk_word_size);
556 static void deallocate_block_a_lot(SpaceManager* sm,
557 size_t chunk_word_size);
558
559 }; 550 };
560 551
561 int Metadebug::_deallocate_block_a_lot_count = 0;
562 int Metadebug::_deallocate_chunk_a_lot_count = 0;
563 int Metadebug::_allocation_fail_alot_count = 0; 552 int Metadebug::_allocation_fail_alot_count = 0;
564 553
565 // SpaceManager - used by Metaspace to handle allocations 554 // SpaceManager - used by Metaspace to handle allocations
566 class SpaceManager : public CHeapObj<mtClass> { 555 class SpaceManager : public CHeapObj<mtClass> {
567 friend class Metaspace; 556 friend class Metaspace;
645 }; 634 };
646 635
647 bool is_class() { return _mdtype == Metaspace::ClassType; } 636 bool is_class() { return _mdtype == Metaspace::ClassType; }
648 637
649 // Accessors 638 // Accessors
650 size_t specialized_chunk_size() { return SpecializedChunk; } 639 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
651 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } 640 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
652 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } 641 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
653 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } 642 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
643
644 size_t smallest_chunk_size() { return specialized_chunk_size(); }
654 645
655 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 646 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
656 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 647 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
657 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 648 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
658 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 649 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
706 // Called when an allocation from the current chunk fails. 697 // Called when an allocation from the current chunk fails.
707 // Gets a new chunk (may require getting a new virtual space), 698 // Gets a new chunk (may require getting a new virtual space),
708 // and allocates from that chunk. 699 // and allocates from that chunk.
709 MetaWord* grow_and_allocate(size_t word_size); 700 MetaWord* grow_and_allocate(size_t word_size);
710 701
702 // Notify memory usage to MemoryService.
703 void track_metaspace_memory_usage();
704
711 // debugging support. 705 // debugging support.
712 706
713 void dump(outputStream* const out) const; 707 void dump(outputStream* const out) const;
714 void print_on(outputStream* st) const; 708 void print_on(outputStream* st) const;
715 void locked_print_chunks_in_use_on(outputStream* st) const; 709 void locked_print_chunks_in_use_on(outputStream* st) const;
720 #ifdef ASSERT 714 #ifdef ASSERT
721 void verify_allocated_blocks_words(); 715 void verify_allocated_blocks_words();
722 #endif 716 #endif
723 717
724 size_t get_raw_word_size(size_t word_size) { 718 size_t get_raw_word_size(size_t word_size) {
725 // If only the dictionary is going to be used (i.e., no
726 // indexed free list), then there is a minimum size requirement.
727 // MinChunkSize is a placeholder for the real minimum size JJJ
728 size_t byte_size = word_size * BytesPerWord; 719 size_t byte_size = word_size * BytesPerWord;
729 720
730 size_t raw_bytes_size = MAX2(byte_size, 721 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
731 Metablock::min_block_byte_size()); 722 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
732 raw_bytes_size = ARENA_ALIGN(raw_bytes_size); 723
733 size_t raw_word_size = raw_bytes_size / BytesPerWord; 724 size_t raw_word_size = raw_bytes_size / BytesPerWord;
734 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 725 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
735 726
736 return raw_word_size; 727 return raw_word_size;
737 } 728 }
780 } 771 }
781 delete _dictionary; 772 delete _dictionary;
782 } 773 }
783 } 774 }
784 775
785 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
786 Metablock* block = (Metablock*) p;
787 block->set_word_size(word_size);
788 block->set_prev(NULL);
789 block->set_next(NULL);
790
791 return block;
792 }
793
794 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 776 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
795 Metablock* free_chunk = initialize_free_chunk(p, word_size); 777 Metablock* free_chunk = ::new (p) Metablock(word_size);
796 if (dictionary() == NULL) { 778 if (dictionary() == NULL) {
797 _dictionary = new BlockTreeDictionary(); 779 _dictionary = new BlockTreeDictionary();
798 } 780 }
799 dictionary()->return_chunk(free_chunk); 781 dictionary()->return_chunk(free_chunk);
800 } 782 }
867 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 849 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
868 // Bottom of the new chunk 850 // Bottom of the new chunk
869 MetaWord* chunk_limit = top(); 851 MetaWord* chunk_limit = top();
870 assert(chunk_limit != NULL, "Not safe to call this method"); 852 assert(chunk_limit != NULL, "Not safe to call this method");
871 853
854 // The virtual spaces are always expanded by the
855 // commit granularity to enforce the following condition.
856 // Without this the is_available check will not work correctly.
857 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
858 "The committed memory doesn't match the expanded memory.");
859
872 if (!is_available(chunk_word_size)) { 860 if (!is_available(chunk_word_size)) {
873 if (TraceMetadataChunkAllocation) { 861 if (TraceMetadataChunkAllocation) {
874 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); 862 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
875 // Dump some information about the virtual space that is nearly full 863 // Dump some information about the virtual space that is nearly full
876 print_on(gclog_or_tty); 864 print_on(gclog_or_tty);
886 return result; 874 return result;
887 } 875 }
888 876
889 877
890 // Expand the virtual space (commit more of the reserved space) 878 // Expand the virtual space (commit more of the reserved space)
891 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) { 879 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
892 size_t bytes = words * BytesPerWord; 880 size_t min_bytes = min_words * BytesPerWord;
893 bool result = virtual_space()->expand_by(bytes, pre_touch); 881 size_t preferred_bytes = preferred_words * BytesPerWord;
894 if (TraceMetavirtualspaceAllocation && !result) { 882
895 gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed " 883 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
896 "for byte size " SIZE_FORMAT, bytes); 884
897 virtual_space()->print_on(gclog_or_tty); 885 if (uncommitted < min_bytes) {
898 } 886 return false;
887 }
888
889 size_t commit = MIN2(preferred_bytes, uncommitted);
890 bool result = virtual_space()->expand_by(commit, false);
891
892 assert(result, "Failed to commit memory");
893
899 return result; 894 return result;
900 } 895 }
901 896
902 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 897 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
903 assert_lock_strong(SpaceManager::expand_lock()); 898 assert_lock_strong(SpaceManager::expand_lock());
912 907
913 if (!_rs.is_reserved()) { 908 if (!_rs.is_reserved()) {
914 return false; 909 return false;
915 } 910 }
916 911
917 // An allocation out of this Virtualspace that is larger 912 // These are necessary restriction to make sure that the virtual space always
918 // than an initial commit size can waste that initial committed 913 // grows in steps of Metaspace::commit_alignment(). If both base and size are
919 // space. 914 // aligned only the middle alignment of the VirtualSpace is used.
920 size_t committed_byte_size = 0; 915 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
921 bool result = virtual_space()->initialize(_rs, committed_byte_size); 916 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
917
918 // ReservedSpaces marked as special will have the entire memory
919 // pre-committed. Setting a committed size will make sure that
920 // committed_size and actual_committed_size agrees.
921 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
922
923 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
924 Metaspace::commit_alignment());
922 if (result) { 925 if (result) {
926 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
927 "Checking that the pre-committed memory was registered by the VirtualSpace");
928
923 set_top((MetaWord*)virtual_space()->low()); 929 set_top((MetaWord*)virtual_space()->low());
924 set_reserved(MemRegion((HeapWord*)_rs.base(), 930 set_reserved(MemRegion((HeapWord*)_rs.base(),
925 (HeapWord*)(_rs.base() + _rs.size()))); 931 (HeapWord*)(_rs.base() + _rs.size())));
926 932
927 assert(reserved()->start() == (HeapWord*) _rs.base(), 933 assert(reserved()->start() == (HeapWord*) _rs.base(),
974 void VirtualSpaceList::dec_reserved_words(size_t v) { 980 void VirtualSpaceList::dec_reserved_words(size_t v) {
975 assert_lock_strong(SpaceManager::expand_lock()); 981 assert_lock_strong(SpaceManager::expand_lock());
976 _reserved_words = _reserved_words - v; 982 _reserved_words = _reserved_words - v;
977 } 983 }
978 984
985 #define assert_committed_below_limit() \
986 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
987 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
988 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \
989 MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
990
979 void VirtualSpaceList::inc_committed_words(size_t v) { 991 void VirtualSpaceList::inc_committed_words(size_t v) {
980 assert_lock_strong(SpaceManager::expand_lock()); 992 assert_lock_strong(SpaceManager::expand_lock());
981 _committed_words = _committed_words + v; 993 _committed_words = _committed_words + v;
994
995 assert_committed_below_limit();
982 } 996 }
983 void VirtualSpaceList::dec_committed_words(size_t v) { 997 void VirtualSpaceList::dec_committed_words(size_t v) {
984 assert_lock_strong(SpaceManager::expand_lock()); 998 assert_lock_strong(SpaceManager::expand_lock());
985 _committed_words = _committed_words - v; 999 _committed_words = _committed_words - v;
1000
1001 assert_committed_below_limit();
986 } 1002 }
987 1003
988 void VirtualSpaceList::inc_virtual_space_count() { 1004 void VirtualSpaceList::inc_virtual_space_count() {
989 assert_lock_strong(SpaceManager::expand_lock()); 1005 assert_lock_strong(SpaceManager::expand_lock());
990 _virtual_space_count++; 1006 _virtual_space_count++;
1002 } else { 1018 } else {
1003 humongous_dictionary()->remove_chunk(chunk); 1019 humongous_dictionary()->remove_chunk(chunk);
1004 } 1020 }
1005 1021
1006 // Chunk is being removed from the chunks free list. 1022 // Chunk is being removed from the chunks free list.
1007 dec_free_chunks_total(chunk->capacity_word_size()); 1023 dec_free_chunks_total(chunk->word_size());
1008 } 1024 }
1009 1025
1010 // Walk the list of VirtualSpaceNodes and delete 1026 // Walk the list of VirtualSpaceNodes and delete
1011 // nodes with a 0 container_count. Remove Metachunks in 1027 // nodes with a 0 container_count. Remove Metachunks in
1012 // the node from their respective freelists. 1028 // the node from their respective freelists.
1023 // Don't free the current virtual space since it will likely 1039 // Don't free the current virtual space since it will likely
1024 // be needed soon. 1040 // be needed soon.
1025 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1041 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1026 // Unlink it from the list 1042 // Unlink it from the list
1027 if (prev_vsl == vsl) { 1043 if (prev_vsl == vsl) {
1028 // This is the case of the current note being the first note. 1044 // This is the case of the current node being the first node.
1029 assert(vsl == virtual_space_list(), "Expected to be the first note"); 1045 assert(vsl == virtual_space_list(), "Expected to be the first node");
1030 set_virtual_space_list(vsl->next()); 1046 set_virtual_space_list(vsl->next());
1031 } else { 1047 } else {
1032 prev_vsl->set_next(vsl->next()); 1048 prev_vsl->set_next(vsl->next());
1033 } 1049 }
1034 1050
1052 } 1068 }
1053 } 1069 }
1054 #endif 1070 #endif
1055 } 1071 }
1056 1072
1057 VirtualSpaceList::VirtualSpaceList(size_t word_size ) : 1073 void VirtualSpaceList::retire_current_virtual_space() {
1074 assert_lock_strong(SpaceManager::expand_lock());
1075
1076 VirtualSpaceNode* vsn = current_virtual_space();
1077
1078 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1079 Metaspace::chunk_manager_metadata();
1080
1081 vsn->retire(cm);
1082 }
1083
1084 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1085 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1086 ChunkIndex index = (ChunkIndex)i;
1087 size_t chunk_size = chunk_manager->free_chunks(index)->size();
1088
1089 while (free_words_in_vs() >= chunk_size) {
1090 DEBUG_ONLY(verify_container_count();)
1091 Metachunk* chunk = get_chunk_vs(chunk_size);
1092 assert(chunk != NULL, "allocation should have been successful");
1093
1094 chunk_manager->return_chunks(index, chunk);
1095 chunk_manager->inc_free_chunks_total(chunk_size);
1096 DEBUG_ONLY(verify_container_count();)
1097 }
1098 }
1099 assert(free_words_in_vs() == 0, "should be empty now");
1100 }
1101
1102 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1058 _is_class(false), 1103 _is_class(false),
1059 _virtual_space_list(NULL), 1104 _virtual_space_list(NULL),
1060 _current_virtual_space(NULL), 1105 _current_virtual_space(NULL),
1061 _reserved_words(0), 1106 _reserved_words(0),
1062 _committed_words(0), 1107 _committed_words(0),
1063 _virtual_space_count(0) { 1108 _virtual_space_count(0) {
1064 MutexLockerEx cl(SpaceManager::expand_lock(), 1109 MutexLockerEx cl(SpaceManager::expand_lock(),
1065 Mutex::_no_safepoint_check_flag); 1110 Mutex::_no_safepoint_check_flag);
1066 bool initialization_succeeded = grow_vs(word_size); 1111 create_new_virtual_space(word_size);
1067 assert(initialization_succeeded,
1068 " VirtualSpaceList initialization should not fail");
1069 } 1112 }
1070 1113
1071 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1114 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1072 _is_class(true), 1115 _is_class(true),
1073 _virtual_space_list(NULL), 1116 _virtual_space_list(NULL),
1077 _virtual_space_count(0) { 1120 _virtual_space_count(0) {
1078 MutexLockerEx cl(SpaceManager::expand_lock(), 1121 MutexLockerEx cl(SpaceManager::expand_lock(),
1079 Mutex::_no_safepoint_check_flag); 1122 Mutex::_no_safepoint_check_flag);
1080 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1123 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1081 bool succeeded = class_entry->initialize(); 1124 bool succeeded = class_entry->initialize();
1082 assert(succeeded, " VirtualSpaceList initialization should not fail"); 1125 if (succeeded) {
1083 link_vs(class_entry); 1126 link_vs(class_entry);
1127 }
1084 } 1128 }
1085 1129
1086 size_t VirtualSpaceList::free_bytes() { 1130 size_t VirtualSpaceList::free_bytes() {
1087 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1131 return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1088 } 1132 }
1089 1133
1090 // Allocate another meta virtual space and add it to the list. 1134 // Allocate another meta virtual space and add it to the list.
1091 bool VirtualSpaceList::grow_vs(size_t vs_word_size) { 1135 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1092 assert_lock_strong(SpaceManager::expand_lock()); 1136 assert_lock_strong(SpaceManager::expand_lock());
1137
1138 if (is_class()) {
1139 assert(false, "We currently don't support more than one VirtualSpace for"
1140 " the compressed class space. The initialization of the"
1141 " CCS uses another code path and should not hit this path.");
1142 return false;
1143 }
1144
1093 if (vs_word_size == 0) { 1145 if (vs_word_size == 0) {
1146 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1094 return false; 1147 return false;
1095 } 1148 }
1149
1096 // Reserve the space 1150 // Reserve the space
1097 size_t vs_byte_size = vs_word_size * BytesPerWord; 1151 size_t vs_byte_size = vs_word_size * BytesPerWord;
1098 assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned"); 1152 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1099 1153
1100 // Allocate the meta virtual space and initialize it. 1154 // Allocate the meta virtual space and initialize it.
1101 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1155 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1102 if (!new_entry->initialize()) { 1156 if (!new_entry->initialize()) {
1103 delete new_entry; 1157 delete new_entry;
1104 return false; 1158 return false;
1105 } else { 1159 } else {
1106 assert(new_entry->reserved_words() == vs_word_size, "Must be"); 1160 assert(new_entry->reserved_words() == vs_word_size,
1161 "Reserved memory size differs from requested memory size");
1107 // ensure lock-free iteration sees fully initialized node 1162 // ensure lock-free iteration sees fully initialized node
1108 OrderAccess::storestore(); 1163 OrderAccess::storestore();
1109 link_vs(new_entry); 1164 link_vs(new_entry);
1110 return true; 1165 return true;
1111 } 1166 }
1128 VirtualSpaceNode* vsl = current_virtual_space(); 1183 VirtualSpaceNode* vsl = current_virtual_space();
1129 vsl->print_on(gclog_or_tty); 1184 vsl->print_on(gclog_or_tty);
1130 } 1185 }
1131 } 1186 }
1132 1187
1133 bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) { 1188 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1189 size_t min_words,
1190 size_t preferred_words) {
1134 size_t before = node->committed_words(); 1191 size_t before = node->committed_words();
1135 1192
1136 bool result = node->expand_by(word_size, pre_touch); 1193 bool result = node->expand_by(min_words, preferred_words);
1137 1194
1138 size_t after = node->committed_words(); 1195 size_t after = node->committed_words();
1139 1196
1140 // after and before can be the same if the memory was pre-committed. 1197 // after and before can be the same if the memory was pre-committed.
1141 assert(after >= before, "Must be"); 1198 assert(after >= before, "Inconsistency");
1142 inc_committed_words(after - before); 1199 inc_committed_words(after - before);
1143 1200
1144 return result; 1201 return result;
1202 }
1203
1204 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1205 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words());
1206 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1207 assert(min_words <= preferred_words, "Invalid arguments");
1208
1209 if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1210 return false;
1211 }
1212
1213 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1214 if (allowed_expansion_words < min_words) {
1215 return false;
1216 }
1217
1218 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1219
1220 // Commit more memory from the the current virtual space.
1221 bool vs_expanded = expand_node_by(current_virtual_space(),
1222 min_words,
1223 max_expansion_words);
1224 if (vs_expanded) {
1225 return true;
1226 }
1227 retire_current_virtual_space();
1228
1229 // Get another virtual space.
1230 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1231 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1232
1233 if (create_new_virtual_space(grow_vs_words)) {
1234 if (current_virtual_space()->is_pre_committed()) {
1235 // The memory was pre-committed, so we are done here.
1236 assert(min_words <= current_virtual_space()->committed_words(),
1237 "The new VirtualSpace was pre-committed, so it"
1238 "should be large enough to fit the alloc request.");
1239 return true;
1240 }
1241
1242 return expand_node_by(current_virtual_space(),
1243 min_words,
1244 max_expansion_words);
1245 }
1246
1247 return false;
1145 } 1248 }
1146 1249
1147 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 1250 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1148 size_t grow_chunks_by_words, 1251 size_t grow_chunks_by_words,
1149 size_t medium_chunk_bunch) { 1252 size_t medium_chunk_bunch) {
1150 1253
1151 // Allocate a chunk out of the current virtual space. 1254 // Allocate a chunk out of the current virtual space.
1152 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1255 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1153 1256
1154 if (next == NULL) { 1257 if (next != NULL) {
1155 // Not enough room in current virtual space. Try to commit 1258 return next;
1156 // more space. 1259 }
1157 size_t expand_vs_by_words = MAX2(medium_chunk_bunch, 1260
1158 grow_chunks_by_words); 1261 // The expand amount is currently only determined by the requested sizes
1159 size_t page_size_words = os::vm_page_size() / BytesPerWord; 1262 // and not how much committed memory is left in the current virtual space.
1160 size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words, 1263
1161 page_size_words); 1264 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1162 bool vs_expanded = 1265 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words());
1163 expand_by(current_virtual_space(), aligned_expand_vs_by_words); 1266 if (min_word_size >= preferred_word_size) {
1164 if (!vs_expanded) { 1267 // Can happen when humongous chunks are allocated.
1165 // Should the capacity of the metaspaces be expanded for 1268 preferred_word_size = min_word_size;
1166 // this allocation? If it's the virtual space for classes and is 1269 }
1167 // being used for CompressedHeaders, don't allocate a new virtualspace. 1270
1168 if (can_grow() && MetaspaceGC::should_expand(this, word_size)) { 1271 bool expanded = expand_by(min_word_size, preferred_word_size);
1169 // Get another virtual space. 1272 if (expanded) {
1170 size_t allocation_aligned_expand_words = 1273 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1171 align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord); 1274 assert(next != NULL, "The allocation was expected to succeed after the expansion");
1172 size_t grow_vs_words = 1275 }
1173 MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words); 1276
1174 if (grow_vs(grow_vs_words)) { 1277 return next;
1175 // Got it. It's on the list now. Get a chunk from it.
1176 assert(current_virtual_space()->expanded_words() == 0,
1177 "New virtual space nodes should not have expanded");
1178
1179 size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
1180 page_size_words);
1181 // We probably want to expand by aligned_expand_vs_by_words here.
1182 expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
1183 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1184 }
1185 } else {
1186 // Allocation will fail and induce a GC
1187 if (TraceMetadataChunkAllocation && Verbose) {
1188 gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
1189 " Fail instead of expand the metaspace");
1190 }
1191 }
1192 } else {
1193 // The virtual space expanded, get a new chunk
1194 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1195 assert(next != NULL, "Just expanded, should succeed");
1196 }
1197 }
1198
1199 assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
1200 "New chunk is still on some list");
1201 return next;
1202 }
1203
1204 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
1205 size_t chunk_bunch) {
1206 // Get a chunk from the chunk freelist
1207 Metachunk* new_chunk = get_new_chunk(chunk_word_size,
1208 chunk_word_size,
1209 chunk_bunch);
1210 return new_chunk;
1211 } 1278 }
1212 1279
1213 void VirtualSpaceList::print_on(outputStream* st) const { 1280 void VirtualSpaceList::print_on(outputStream* st) const {
1214 if (TraceMetadataChunkAllocation && Verbose) { 1281 if (TraceMetadataChunkAllocation && Verbose) {
1215 VirtualSpaceListIterator iter(virtual_space_list()); 1282 VirtualSpaceListIterator iter(virtual_space_list());
1254 // the HWM. 1321 // the HWM.
1255 1322
1256 // Calculate the amount to increase the high water mark (HWM). 1323 // Calculate the amount to increase the high water mark (HWM).
1257 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1324 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1258 // another expansion is not requested too soon. If that is not 1325 // another expansion is not requested too soon. If that is not
1259 // enough to satisfy the allocation (i.e. big enough for a word_size 1326 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1260 // allocation), increase by MaxMetaspaceExpansion. If that is still 1327 // If that is still not enough, expand by the size of the allocation
1261 // not enough, expand by the size of the allocation (word_size) plus 1328 // plus some.
1262 // some. 1329 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1263 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) { 1330 size_t min_delta = MinMetaspaceExpansion;
1264 size_t before_inc = MetaspaceGC::capacity_until_GC(); 1331 size_t max_delta = MaxMetaspaceExpansion;
1265 size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord; 1332 size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1266 size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord; 1333
1267 size_t page_size_words = os::vm_page_size() / BytesPerWord; 1334 if (delta <= min_delta) {
1268 size_t size_delta_words = align_size_up(word_size, page_size_words); 1335 delta = min_delta;
1269 size_t delta_words = MAX2(size_delta_words, min_delta_words); 1336 } else if (delta <= max_delta) {
1270 if (delta_words > min_delta_words) {
1271 // Don't want to hit the high water mark on the next 1337 // Don't want to hit the high water mark on the next
1272 // allocation so make the delta greater than just enough 1338 // allocation so make the delta greater than just enough
1273 // for this allocation. 1339 // for this allocation.
1274 delta_words = MAX2(delta_words, max_delta_words); 1340 delta = max_delta;
1275 if (delta_words > max_delta_words) { 1341 } else {
1276 // This allocation is large but the next ones are probably not 1342 // This allocation is large but the next ones are probably not
1277 // so increase by the minimum. 1343 // so increase by the minimum.
1278 delta_words = delta_words + min_delta_words; 1344 delta = delta + min_delta;
1279 } 1345 }
1280 } 1346
1281 return delta_words; 1347 assert_is_size_aligned(delta, Metaspace::commit_alignment());
1282 } 1348
1283 1349 return delta;
1284 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { 1350 }
1285 1351
1286 // If the user wants a limit, impose one. 1352 size_t MetaspaceGC::capacity_until_GC() {
1287 // The reason for someone using this flag is to limit reserved space. So 1353 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1288 // for non-class virtual space, compare against virtual spaces that are reserved. 1354 assert(value >= MetaspaceSize, "Not initialied properly?");
1289 // For class virtual space, we only compare against the committed space, not 1355 return value;
1290 // reserved space, because this is a larger space prereserved for compressed 1356 }
1291 // class pointers. 1357
1292 if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { 1358 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
1293 size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 1359 assert_is_size_aligned(v, Metaspace::commit_alignment());
1294 size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); 1360
1295 size_t real_allocated = nonclass_allocated + class_allocated; 1361 return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
1296 if (real_allocated >= MaxMetaspaceSize) { 1362 }
1363
1364 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1365 assert_is_size_aligned(v, Metaspace::commit_alignment());
1366
1367 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1368 }
1369
1370 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1371 // Check if the compressed class space is full.
1372 if (is_class && Metaspace::using_class_space()) {
1373 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1374 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1297 return false; 1375 return false;
1298 } 1376 }
1299 } 1377 }
1300 1378
1301 // Class virtual space should always be expanded. Call GC for the other 1379 // Check if the user has imposed a limit on the metaspace memory.
1302 // metadata virtual space. 1380 size_t committed_bytes = MetaspaceAux::committed_bytes();
1303 if (Metaspace::using_class_space() && 1381 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1304 (vsl == Metaspace::class_space_list())) return true; 1382 return false;
1305 1383 }
1306 // If this is part of an allocation after a GC, expand 1384
1307 // unconditionally. 1385 return true;
1308 if (MetaspaceGC::expand_after_GC()) { 1386 }
1309 return true; 1387
1310 } 1388 size_t MetaspaceGC::allowed_expansion() {
1311 1389 size_t committed_bytes = MetaspaceAux::committed_bytes();
1312 1390
1313 // If the capacity is below the minimum capacity, allow the 1391 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
1314 // expansion. Also set the high-water-mark (capacity_until_GC) 1392
1315 // to that minimum capacity so that a GC will not be induced 1393 // Always grant expansion if we are initiating the JVM,
1316 // until that minimum capacity is exceeded. 1394 // or if the GC_locker is preventing GCs.
1317 size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); 1395 if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
1318 size_t metaspace_size_bytes = MetaspaceSize; 1396 return left_until_max / BytesPerWord;
1319 if (committed_capacity_bytes < metaspace_size_bytes || 1397 }
1320 capacity_until_GC() == 0) { 1398
1321 set_capacity_until_GC(metaspace_size_bytes); 1399 size_t capacity_until_gc = capacity_until_GC();
1322 return true; 1400
1323 } else { 1401 if (capacity_until_gc <= committed_bytes) {
1324 if (committed_capacity_bytes < capacity_until_GC()) { 1402 return 0;
1325 return true; 1403 }
1326 } else { 1404
1327 if (TraceMetadataChunkAllocation && Verbose) { 1405 size_t left_until_GC = capacity_until_gc - committed_bytes;
1328 gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT 1406 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1329 " capacity_until_GC " SIZE_FORMAT 1407
1330 " allocated_capacity_bytes " SIZE_FORMAT, 1408 return left_to_commit / BytesPerWord;
1331 word_size, 1409 }
1332 capacity_until_GC(),
1333 MetaspaceAux::allocated_capacity_bytes());
1334 }
1335 return false;
1336 }
1337 }
1338 }
1339
1340
1341 1410
1342 void MetaspaceGC::compute_new_size() { 1411 void MetaspaceGC::compute_new_size() {
1343 assert(_shrink_factor <= 100, "invalid shrink factor"); 1412 assert(_shrink_factor <= 100, "invalid shrink factor");
1344 uint current_shrink_factor = _shrink_factor; 1413 uint current_shrink_factor = _shrink_factor;
1345 _shrink_factor = 0; 1414 _shrink_factor = 0;
1346 1415
1347 // Until a faster way of calculating the "used" quantity is implemented,
1348 // use "capacity".
1349 const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); 1416 const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1350 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1417 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1351 1418
1352 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1419 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1353 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1420 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1375 size_t shrink_bytes = 0; 1442 size_t shrink_bytes = 0;
1376 if (capacity_until_GC < minimum_desired_capacity) { 1443 if (capacity_until_GC < minimum_desired_capacity) {
1377 // If we have less capacity below the metaspace HWM, then 1444 // If we have less capacity below the metaspace HWM, then
1378 // increment the HWM. 1445 // increment the HWM.
1379 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1446 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1447 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1380 // Don't expand unless it's significant 1448 // Don't expand unless it's significant
1381 if (expand_bytes >= MinMetaspaceExpansion) { 1449 if (expand_bytes >= MinMetaspaceExpansion) {
1382 MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); 1450 MetaspaceGC::inc_capacity_until_GC(expand_bytes);
1383 } 1451 }
1384 if (PrintGCDetails && Verbose) { 1452 if (PrintGCDetails && Verbose) {
1385 size_t new_capacity_until_GC = capacity_until_GC; 1453 size_t new_capacity_until_GC = capacity_until_GC;
1386 gclog_or_tty->print_cr(" expanding:" 1454 gclog_or_tty->print_cr(" expanding:"
1387 " minimum_desired_capacity: %6.1fKB" 1455 " minimum_desired_capacity: %6.1fKB"
1434 // we'd just have to grow the heap up again for the next phase. So we 1502 // we'd just have to grow the heap up again for the next phase. So we
1435 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1503 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1436 // on the third call, and 100% by the fourth call. But if we recompute 1504 // on the third call, and 100% by the fourth call. But if we recompute
1437 // size without shrinking, it goes back to 0%. 1505 // size without shrinking, it goes back to 0%.
1438 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1506 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1507
1508 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1509
1439 assert(shrink_bytes <= max_shrink_bytes, 1510 assert(shrink_bytes <= max_shrink_bytes,
1440 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1511 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1441 shrink_bytes, max_shrink_bytes)); 1512 shrink_bytes, max_shrink_bytes));
1442 if (current_shrink_factor == 0) { 1513 if (current_shrink_factor == 0) {
1443 _shrink_factor = 10; 1514 _shrink_factor = 10;
1465 } 1536 }
1466 1537
1467 // Don't shrink unless it's significant 1538 // Don't shrink unless it's significant
1468 if (shrink_bytes >= MinMetaspaceExpansion && 1539 if (shrink_bytes >= MinMetaspaceExpansion &&
1469 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1540 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1470 MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); 1541 MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1471 } 1542 }
1472 } 1543 }
1473 1544
1474 // Metadebug methods 1545 // Metadebug methods
1475
1476 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1477 size_t chunk_word_size){
1478 #ifdef ASSERT
1479 VirtualSpaceList* vsl = sm->vs_list();
1480 if (MetaDataDeallocateALot &&
1481 Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1482 Metadebug::reset_deallocate_chunk_a_lot_count();
1483 for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1484 Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1485 if (dummy_chunk == NULL) {
1486 break;
1487 }
1488 sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1489
1490 if (TraceMetadataChunkAllocation && Verbose) {
1491 gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1492 sm->sum_count_in_chunks_in_use());
1493 dummy_chunk->print_on(gclog_or_tty);
1494 gclog_or_tty->print_cr(" Free chunks total %d count %d",
1495 sm->chunk_manager()->free_chunks_total_words(),
1496 sm->chunk_manager()->free_chunks_count());
1497 }
1498 }
1499 } else {
1500 Metadebug::inc_deallocate_chunk_a_lot_count();
1501 }
1502 #endif
1503 }
1504
1505 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1506 size_t raw_word_size){
1507 #ifdef ASSERT
1508 if (MetaDataDeallocateALot &&
1509 Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1510 Metadebug::set_deallocate_block_a_lot_count(0);
1511 for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1512 MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1513 if (dummy_block == 0) {
1514 break;
1515 }
1516 sm->deallocate(dummy_block, raw_word_size);
1517 }
1518 } else {
1519 Metadebug::inc_deallocate_block_a_lot_count();
1520 }
1521 #endif
1522 }
1523 1546
1524 void Metadebug::init_allocation_fail_alot_count() { 1547 void Metadebug::init_allocation_fail_alot_count() {
1525 if (MetadataAllocationFailALot) { 1548 if (MetadataAllocationFailALot) {
1526 _allocation_fail_alot_count = 1549 _allocation_fail_alot_count =
1527 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1550 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1662 ChunkIndex index = list_index(word_size); 1685 ChunkIndex index = list_index(word_size);
1663 assert(index < HumongousIndex, "No humongous list"); 1686 assert(index < HumongousIndex, "No humongous list");
1664 return free_chunks(index); 1687 return free_chunks(index);
1665 } 1688 }
1666 1689
1667 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1668 assert_lock_strong(SpaceManager::expand_lock());
1669 ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1670 chunk->set_next(free_list->head());
1671 free_list->set_head(chunk);
1672 // chunk is being returned to the chunk free list
1673 inc_free_chunks_total(chunk->capacity_word_size());
1674 slow_locked_verify();
1675 }
1676
1677 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1678 // The deallocation of a chunk originates in the freelist
1679 // manangement code for a Metaspace and does not hold the
1680 // lock.
1681 assert(chunk != NULL, "Deallocating NULL");
1682 assert_lock_strong(SpaceManager::expand_lock());
1683 slow_locked_verify();
1684 if (TraceMetadataChunkAllocation) {
1685 gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1686 PTR_FORMAT " size " SIZE_FORMAT,
1687 chunk, chunk->word_size());
1688 }
1689 free_chunks_put(chunk);
1690 }
1691
1692 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1690 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1693 assert_lock_strong(SpaceManager::expand_lock()); 1691 assert_lock_strong(SpaceManager::expand_lock());
1694 1692
1695 slow_locked_verify(); 1693 slow_locked_verify();
1696 1694
1698 if (list_index(word_size) != HumongousIndex) { 1696 if (list_index(word_size) != HumongousIndex) {
1699 ChunkList* free_list = find_free_chunks_list(word_size); 1697 ChunkList* free_list = find_free_chunks_list(word_size);
1700 assert(free_list != NULL, "Sanity check"); 1698 assert(free_list != NULL, "Sanity check");
1701 1699
1702 chunk = free_list->head(); 1700 chunk = free_list->head();
1703 debug_only(Metachunk* debug_head = chunk;)
1704 1701
1705 if (chunk == NULL) { 1702 if (chunk == NULL) {
1706 return NULL; 1703 return NULL;
1707 } 1704 }
1708 1705
1709 // Remove the chunk as the head of the list. 1706 // Remove the chunk as the head of the list.
1710 free_list->remove_chunk(chunk); 1707 free_list->remove_chunk(chunk);
1711
1712 // Chunk is being removed from the chunks free list.
1713 dec_free_chunks_total(chunk->capacity_word_size());
1714 1708
1715 if (TraceMetadataChunkAllocation && Verbose) { 1709 if (TraceMetadataChunkAllocation && Verbose) {
1716 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " 1710 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1717 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1711 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1718 free_list, chunk, chunk->word_size()); 1712 free_list, chunk, chunk->word_size());
1720 } else { 1714 } else {
1721 chunk = humongous_dictionary()->get_chunk( 1715 chunk = humongous_dictionary()->get_chunk(
1722 word_size, 1716 word_size,
1723 FreeBlockDictionary<Metachunk>::atLeast); 1717 FreeBlockDictionary<Metachunk>::atLeast);
1724 1718
1725 if (chunk != NULL) { 1719 if (chunk == NULL) {
1726 if (TraceMetadataHumongousAllocation) {
1727 size_t waste = chunk->word_size() - word_size;
1728 gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1729 SIZE_FORMAT " for requested size " SIZE_FORMAT
1730 " waste " SIZE_FORMAT,
1731 chunk->word_size(), word_size, waste);
1732 }
1733 // Chunk is being removed from the chunks free list.
1734 dec_free_chunks_total(chunk->capacity_word_size());
1735 } else {
1736 return NULL; 1720 return NULL;
1737 } 1721 }
1738 } 1722
1723 if (TraceMetadataHumongousAllocation) {
1724 size_t waste = chunk->word_size() - word_size;
1725 gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1726 SIZE_FORMAT " for requested size " SIZE_FORMAT
1727 " waste " SIZE_FORMAT,
1728 chunk->word_size(), word_size, waste);
1729 }
1730 }
1731
1732 // Chunk is being removed from the chunks free list.
1733 dec_free_chunks_total(chunk->word_size());
1739 1734
1740 // Remove it from the links to this freelist 1735 // Remove it from the links to this freelist
1741 chunk->set_next(NULL); 1736 chunk->set_next(NULL);
1742 chunk->set_prev(NULL); 1737 chunk->set_prev(NULL);
1743 #ifdef ASSERT 1738 #ifdef ASSERT
1744 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1739 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1745 // work. 1740 // work.
1746 chunk->set_is_free(false); 1741 chunk->set_is_tagged_free(false);
1747 #endif 1742 #endif
1748 chunk->container()->inc_container_count(); 1743 chunk->container()->inc_container_count();
1749 1744
1750 slow_locked_verify(); 1745 slow_locked_verify();
1751 return chunk; 1746 return chunk;
1873 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1868 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1874 size_t sum = 0; 1869 size_t sum = 0;
1875 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1870 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1876 Metachunk* chunk = chunks_in_use(i); 1871 Metachunk* chunk = chunks_in_use(i);
1877 while (chunk != NULL) { 1872 while (chunk != NULL) {
1878 sum += chunk->capacity_word_size(); 1873 sum += chunk->word_size();
1879 chunk = chunk->next(); 1874 chunk = chunk->next();
1880 } 1875 }
1881 } 1876 }
1882 return sum; 1877 return sum;
1883 } 1878 }
1949 } 1944 }
1950 } else { 1945 } else {
1951 chunk_word_size = medium_chunk_size(); 1946 chunk_word_size = medium_chunk_size();
1952 } 1947 }
1953 1948
1954 // Might still need a humongous chunk. Enforce an 1949 // Might still need a humongous chunk. Enforce
1955 // eight word granularity to facilitate reuse (some 1950 // humongous allocations sizes to be aligned up to
1956 // wastage but better chance of reuse). 1951 // the smallest chunk size.
1957 size_t if_humongous_sized_chunk = 1952 size_t if_humongous_sized_chunk =
1958 align_size_up(word_size + Metachunk::overhead(), 1953 align_size_up(word_size + Metachunk::overhead(),
1959 HumongousChunkGranularity); 1954 smallest_chunk_size());
1960 chunk_word_size = 1955 chunk_word_size =
1961 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 1956 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1962 1957
1963 assert(!SpaceManager::is_humongous(word_size) || 1958 assert(!SpaceManager::is_humongous(word_size) ||
1964 chunk_word_size == if_humongous_sized_chunk, 1959 chunk_word_size == if_humongous_sized_chunk,
1975 Metachunk::overhead()); 1970 Metachunk::overhead());
1976 } 1971 }
1977 return chunk_word_size; 1972 return chunk_word_size;
1978 } 1973 }
1979 1974
1975 void SpaceManager::track_metaspace_memory_usage() {
1976 if (is_init_completed()) {
1977 if (is_class()) {
1978 MemoryService::track_compressed_class_memory_usage();
1979 }
1980 MemoryService::track_metaspace_memory_usage();
1981 }
1982 }
1983
1980 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 1984 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1981 assert(vs_list()->current_virtual_space() != NULL, 1985 assert(vs_list()->current_virtual_space() != NULL,
1982 "Should have been set"); 1986 "Should have been set");
1983 assert(current_chunk() == NULL || 1987 assert(current_chunk() == NULL ||
1984 current_chunk()->allocate(word_size) == NULL, 1988 current_chunk()->allocate(word_size) == NULL,
2000 2004
2001 // Get another chunk out of the virtual space 2005 // Get another chunk out of the virtual space
2002 size_t grow_chunks_by_words = calc_chunk_size(word_size); 2006 size_t grow_chunks_by_words = calc_chunk_size(word_size);
2003 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 2007 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2004 2008
2009 MetaWord* mem = NULL;
2010
2005 // If a chunk was available, add it to the in-use chunk list 2011 // If a chunk was available, add it to the in-use chunk list
2006 // and do an allocation from it. 2012 // and do an allocation from it.
2007 if (next != NULL) { 2013 if (next != NULL) {
2008 Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
2009 // Add to this manager's list of chunks in use. 2014 // Add to this manager's list of chunks in use.
2010 add_chunk(next, false); 2015 add_chunk(next, false);
2011 return next->allocate(word_size); 2016 mem = next->allocate(word_size);
2012 } 2017 }
2013 return NULL; 2018
2019 // Track metaspace memory usage statistic.
2020 track_metaspace_memory_usage();
2021
2022 return mem;
2014 } 2023 }
2015 2024
2016 void SpaceManager::print_on(outputStream* st) const { 2025 void SpaceManager::print_on(outputStream* st) const {
2017 2026
2018 for (ChunkIndex i = ZeroIndex; 2027 for (ChunkIndex i = ZeroIndex;
2103 assert(cur->container() != NULL, "Container should have been set"); 2112 assert(cur->container() != NULL, "Container should have been set");
2104 cur->container()->dec_container_count(); 2113 cur->container()->dec_container_count();
2105 // Capture the next link before it is changed 2114 // Capture the next link before it is changed
2106 // by the call to return_chunk_at_head(); 2115 // by the call to return_chunk_at_head();
2107 Metachunk* next = cur->next(); 2116 Metachunk* next = cur->next();
2108 cur->set_is_free(true); 2117 DEBUG_ONLY(cur->set_is_tagged_free(true);)
2109 list->return_chunk_at_head(cur); 2118 list->return_chunk_at_head(cur);
2110 cur = next; 2119 cur = next;
2111 } 2120 }
2112 } 2121 }
2113 2122
2175 // Humongous chunks are never the current chunk. 2184 // Humongous chunks are never the current chunk.
2176 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2185 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2177 2186
2178 while (humongous_chunks != NULL) { 2187 while (humongous_chunks != NULL) {
2179 #ifdef ASSERT 2188 #ifdef ASSERT
2180 humongous_chunks->set_is_free(true); 2189 humongous_chunks->set_is_tagged_free(true);
2181 #endif 2190 #endif
2182 if (TraceMetadataChunkAllocation && Verbose) { 2191 if (TraceMetadataChunkAllocation && Verbose) {
2183 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", 2192 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2184 humongous_chunks, 2193 humongous_chunks,
2185 humongous_chunks->word_size()); 2194 humongous_chunks->word_size());
2186 } 2195 }
2187 assert(humongous_chunks->word_size() == (size_t) 2196 assert(humongous_chunks->word_size() == (size_t)
2188 align_size_up(humongous_chunks->word_size(), 2197 align_size_up(humongous_chunks->word_size(),
2189 HumongousChunkGranularity), 2198 smallest_chunk_size()),
2190 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT 2199 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2191 " granularity %d", 2200 " granularity %d",
2192 humongous_chunks->word_size(), HumongousChunkGranularity)); 2201 humongous_chunks->word_size(), smallest_chunk_size()));
2193 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2202 Metachunk* next_humongous_chunks = humongous_chunks->next();
2194 humongous_chunks->container()->dec_container_count(); 2203 humongous_chunks->container()->dec_container_count();
2195 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); 2204 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2196 humongous_chunks = next_humongous_chunks; 2205 humongous_chunks = next_humongous_chunks;
2197 } 2206 }
2339 p = fl->get_block(raw_word_size); 2348 p = fl->get_block(raw_word_size);
2340 } 2349 }
2341 if (p == NULL) { 2350 if (p == NULL) {
2342 p = allocate_work(raw_word_size); 2351 p = allocate_work(raw_word_size);
2343 } 2352 }
2344 Metadebug::deallocate_block_a_lot(this, raw_word_size);
2345 2353
2346 return p; 2354 return p;
2347 } 2355 }
2348 2356
2349 // Returns the address of spaced allocated for "word_size". 2357 // Returns the address of spaced allocated for "word_size".
2364 if (DumpSharedSpaces) { 2372 if (DumpSharedSpaces) {
2365 assert(current_chunk() != NULL, "should never happen"); 2373 assert(current_chunk() != NULL, "should never happen");
2366 inc_used_metrics(word_size); 2374 inc_used_metrics(word_size);
2367 return current_chunk()->allocate(word_size); // caller handles null result 2375 return current_chunk()->allocate(word_size); // caller handles null result
2368 } 2376 }
2377
2369 if (current_chunk() != NULL) { 2378 if (current_chunk() != NULL) {
2370 result = current_chunk()->allocate(word_size); 2379 result = current_chunk()->allocate(word_size);
2371 } 2380 }
2372 2381
2373 if (result == NULL) { 2382 if (result == NULL) {
2374 result = grow_and_allocate(word_size); 2383 result = grow_and_allocate(word_size);
2375 } 2384 }
2376 if (result != 0) { 2385
2386 if (result != NULL) {
2377 inc_used_metrics(word_size); 2387 inc_used_metrics(word_size);
2378 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2388 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2379 "Head of the list is being allocated"); 2389 "Head of the list is being allocated");
2380 } 2390 }
2381 2391
2436 curr = curr->next()) { 2446 curr = curr->next()) {
2437 out->print("%d) ", i++); 2447 out->print("%d) ", i++);
2438 curr->print_on(out); 2448 curr->print_on(out);
2439 curr_total += curr->word_size(); 2449 curr_total += curr->word_size();
2440 used += curr->used_word_size(); 2450 used += curr->used_word_size();
2441 capacity += curr->capacity_word_size(); 2451 capacity += curr->word_size();
2442 waste += curr->free_word_size() + curr->overhead();; 2452 waste += curr->free_word_size() + curr->overhead();;
2443 } 2453 }
2444 } 2454 }
2445 2455
2446 if (TraceMetadataChunkAllocation && Verbose) { 2456 if (TraceMetadataChunkAllocation && Verbose) {
2637 2647
2638 // This is printed when PrintGCDetails 2648 // This is printed when PrintGCDetails
2639 void MetaspaceAux::print_on(outputStream* out) { 2649 void MetaspaceAux::print_on(outputStream* out) {
2640 Metaspace::MetadataType nct = Metaspace::NonClassType; 2650 Metaspace::MetadataType nct = Metaspace::NonClassType;
2641 2651
2642 out->print_cr(" Metaspace total " 2652 out->print_cr(" Metaspace "
2643 SIZE_FORMAT "K, used " SIZE_FORMAT "K," 2653 "used " SIZE_FORMAT "K, "
2644 " reserved " SIZE_FORMAT "K", 2654 "capacity " SIZE_FORMAT "K, "
2645 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K); 2655 "committed " SIZE_FORMAT "K, "
2646 2656 "reserved " SIZE_FORMAT "K",
2647 out->print_cr(" data space " 2657 allocated_used_bytes()/K,
2648 SIZE_FORMAT "K, used " SIZE_FORMAT "K," 2658 allocated_capacity_bytes()/K,
2649 " reserved " SIZE_FORMAT "K", 2659 committed_bytes()/K,
2650 allocated_capacity_bytes(nct)/K, 2660 reserved_bytes()/K);
2651 allocated_used_bytes(nct)/K, 2661
2652 reserved_bytes(nct)/K);
2653 if (Metaspace::using_class_space()) { 2662 if (Metaspace::using_class_space()) {
2654 Metaspace::MetadataType ct = Metaspace::ClassType; 2663 Metaspace::MetadataType ct = Metaspace::ClassType;
2655 out->print_cr(" class space " 2664 out->print_cr(" class space "
2656 SIZE_FORMAT "K, used " SIZE_FORMAT "K," 2665 "used " SIZE_FORMAT "K, "
2657 " reserved " SIZE_FORMAT "K", 2666 "capacity " SIZE_FORMAT "K, "
2667 "committed " SIZE_FORMAT "K, "
2668 "reserved " SIZE_FORMAT "K",
2669 allocated_used_bytes(ct)/K,
2658 allocated_capacity_bytes(ct)/K, 2670 allocated_capacity_bytes(ct)/K,
2659 allocated_used_bytes(ct)/K, 2671 committed_bytes(ct)/K,
2660 reserved_bytes(ct)/K); 2672 reserved_bytes(ct)/K);
2661 } 2673 }
2662 } 2674 }
2663 2675
2664 // Print information for class space and data space separately. 2676 // Print information for class space and data space separately.
2806 // Metaspace methods 2818 // Metaspace methods
2807 2819
2808 size_t Metaspace::_first_chunk_word_size = 0; 2820 size_t Metaspace::_first_chunk_word_size = 0;
2809 size_t Metaspace::_first_class_chunk_word_size = 0; 2821 size_t Metaspace::_first_class_chunk_word_size = 0;
2810 2822
2823 size_t Metaspace::_commit_alignment = 0;
2824 size_t Metaspace::_reserve_alignment = 0;
2825
2811 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 2826 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2812 initialize(lock, type); 2827 initialize(lock, type);
2813 } 2828 }
2814 2829
2815 Metaspace::~Metaspace() { 2830 Metaspace::~Metaspace() {
2826 ChunkManager* Metaspace::_chunk_manager_class = NULL; 2841 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2827 2842
2828 #define VIRTUALSPACEMULTIPLIER 2 2843 #define VIRTUALSPACEMULTIPLIER 2
2829 2844
2830 #ifdef _LP64 2845 #ifdef _LP64
2846 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2847
2831 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 2848 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2832 // Figure out the narrow_klass_base and the narrow_klass_shift. The 2849 // Figure out the narrow_klass_base and the narrow_klass_shift. The
2833 // narrow_klass_base is the lower of the metaspace base and the cds base 2850 // narrow_klass_base is the lower of the metaspace base and the cds base
2834 // (if cds is enabled). The narrow_klass_shift depends on the distance 2851 // (if cds is enabled). The narrow_klass_shift depends on the distance
2835 // between the lower base and higher address. 2852 // between the lower base and higher address.
2836 address lower_base; 2853 address lower_base;
2837 address higher_address; 2854 address higher_address;
2838 if (UseSharedSpaces) { 2855 if (UseSharedSpaces) {
2839 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2856 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2840 (address)(metaspace_base + class_metaspace_size())); 2857 (address)(metaspace_base + compressed_class_space_size()));
2841 lower_base = MIN2(metaspace_base, cds_base); 2858 lower_base = MIN2(metaspace_base, cds_base);
2842 } else { 2859 } else {
2843 higher_address = metaspace_base + class_metaspace_size(); 2860 higher_address = metaspace_base + compressed_class_space_size();
2844 lower_base = metaspace_base; 2861 lower_base = metaspace_base;
2845 } 2862
2863 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2864 // If compressed class space fits in lower 32G, we don't need a base.
2865 if (higher_address <= (address)klass_encoding_max) {
2866 lower_base = 0; // effectively lower base is zero.
2867 }
2868 }
2869
2846 Universe::set_narrow_klass_base(lower_base); 2870 Universe::set_narrow_klass_base(lower_base);
2847 if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) { 2871
2872 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
2848 Universe::set_narrow_klass_shift(0); 2873 Universe::set_narrow_klass_shift(0);
2849 } else { 2874 } else {
2850 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 2875 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2851 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 2876 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2852 } 2877 }
2857 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 2882 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2858 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 2883 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2859 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 2884 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2860 address lower_base = MIN2((address)metaspace_base, cds_base); 2885 address lower_base = MIN2((address)metaspace_base, cds_base);
2861 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2886 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2862 (address)(metaspace_base + class_metaspace_size())); 2887 (address)(metaspace_base + compressed_class_space_size()));
2863 return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint); 2888 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
2864 } 2889 }
2865 2890
2866 // Try to allocate the metaspace at the requested addr. 2891 // Try to allocate the metaspace at the requested addr.
2867 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 2892 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2868 assert(using_class_space(), "called improperly"); 2893 assert(using_class_space(), "called improperly");
2869 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 2894 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2870 assert(class_metaspace_size() < KlassEncodingMetaspaceMax, 2895 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
2871 "Metaspace size is too big"); 2896 "Metaspace size is too big");
2872 2897 assert_is_ptr_aligned(requested_addr, _reserve_alignment);
2873 ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(), 2898 assert_is_ptr_aligned(cds_base, _reserve_alignment);
2874 os::vm_allocation_granularity(), 2899 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
2875 false, requested_addr, 0); 2900
2901 // Don't use large pages for the class space.
2902 bool large_pages = false;
2903
2904 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
2905 _reserve_alignment,
2906 large_pages,
2907 requested_addr, 0);
2876 if (!metaspace_rs.is_reserved()) { 2908 if (!metaspace_rs.is_reserved()) {
2877 if (UseSharedSpaces) { 2909 if (UseSharedSpaces) {
2910 size_t increment = align_size_up(1*G, _reserve_alignment);
2911
2878 // Keep trying to allocate the metaspace, increasing the requested_addr 2912 // Keep trying to allocate the metaspace, increasing the requested_addr
2879 // by 1GB each time, until we reach an address that will no longer allow 2913 // by 1GB each time, until we reach an address that will no longer allow
2880 // use of CDS with compressed klass pointers. 2914 // use of CDS with compressed klass pointers.
2881 char *addr = requested_addr; 2915 char *addr = requested_addr;
2882 while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) && 2916 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
2883 can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) { 2917 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
2884 addr = addr + 1*G; 2918 addr = addr + increment;
2885 metaspace_rs = ReservedSpace(class_metaspace_size(), 2919 metaspace_rs = ReservedSpace(compressed_class_space_size(),
2886 os::vm_allocation_granularity(), false, addr, 0); 2920 _reserve_alignment, large_pages, addr, 0);
2887 } 2921 }
2888 } 2922 }
2889 2923
2890 // If no successful allocation then try to allocate the space anywhere. If 2924 // If no successful allocation then try to allocate the space anywhere. If
2891 // that fails then OOM doom. At this point we cannot try allocating the 2925 // that fails then OOM doom. At this point we cannot try allocating the
2892 // metaspace as if UseCompressedClassPointers is off because too much 2926 // metaspace as if UseCompressedClassPointers is off because too much
2893 // initialization has happened that depends on UseCompressedClassPointers. 2927 // initialization has happened that depends on UseCompressedClassPointers.
2894 // So, UseCompressedClassPointers cannot be turned off at this point. 2928 // So, UseCompressedClassPointers cannot be turned off at this point.
2895 if (!metaspace_rs.is_reserved()) { 2929 if (!metaspace_rs.is_reserved()) {
2896 metaspace_rs = ReservedSpace(class_metaspace_size(), 2930 metaspace_rs = ReservedSpace(compressed_class_space_size(),
2897 os::vm_allocation_granularity(), false); 2931 _reserve_alignment, large_pages);
2898 if (!metaspace_rs.is_reserved()) { 2932 if (!metaspace_rs.is_reserved()) {
2899 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", 2933 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
2900 class_metaspace_size())); 2934 compressed_class_space_size()));
2901 } 2935 }
2902 } 2936 }
2903 } 2937 }
2904 2938
2905 // If we got here then the metaspace got allocated. 2939 // If we got here then the metaspace got allocated.
2917 initialize_class_space(metaspace_rs); 2951 initialize_class_space(metaspace_rs);
2918 2952
2919 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { 2953 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
2920 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, 2954 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
2921 Universe::narrow_klass_base(), Universe::narrow_klass_shift()); 2955 Universe::narrow_klass_base(), Universe::narrow_klass_shift());
2922 gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, 2956 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
2923 class_metaspace_size(), metaspace_rs.base(), requested_addr); 2957 compressed_class_space_size(), metaspace_rs.base(), requested_addr);
2924 } 2958 }
2925 } 2959 }
2926 2960
2927 // For UseCompressedClassPointers the class space is reserved above the top of 2961 // For UseCompressedClassPointers the class space is reserved above the top of
2928 // the Java heap. The argument passed in is at the base of the compressed space. 2962 // the Java heap. The argument passed in is at the base of the compressed space.
2931 assert(rs.size() >= CompressedClassSpaceSize, 2965 assert(rs.size() >= CompressedClassSpaceSize,
2932 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize)); 2966 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
2933 assert(using_class_space(), "Must be using class space"); 2967 assert(using_class_space(), "Must be using class space");
2934 _class_space_list = new VirtualSpaceList(rs); 2968 _class_space_list = new VirtualSpaceList(rs);
2935 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 2969 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
2970
2971 if (!_class_space_list->initialization_succeeded()) {
2972 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
2973 }
2936 } 2974 }
2937 2975
2938 #endif 2976 #endif
2977
2978 void Metaspace::ergo_initialize() {
2979 if (DumpSharedSpaces) {
2980 // Using large pages when dumping the shared archive is currently not implemented.
2981 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
2982 }
2983
2984 size_t page_size = os::vm_page_size();
2985 if (UseLargePages && UseLargePagesInMetaspace) {
2986 page_size = os::large_page_size();
2987 }
2988
2989 _commit_alignment = page_size;
2990 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
2991
2992 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
2993 // override if MaxMetaspaceSize was set on the command line or not.
2994 // This information is needed later to conform to the specification of the
2995 // java.lang.management.MemoryUsage API.
2996 //
2997 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
2998 // globals.hpp to the aligned value, but this is not possible, since the
2999 // alignment depends on other flags being parsed.
3000 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3001
3002 if (MetaspaceSize > MaxMetaspaceSize) {
3003 MetaspaceSize = MaxMetaspaceSize;
3004 }
3005
3006 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3007
3008 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3009
3010 if (MetaspaceSize < 256*K) {
3011 vm_exit_during_initialization("Too small initial Metaspace size");
3012 }
3013
3014 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3015 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3016
3017 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3018 set_compressed_class_space_size(CompressedClassSpaceSize);
3019 }
2939 3020
2940 void Metaspace::global_initialize() { 3021 void Metaspace::global_initialize() {
2941 // Initialize the alignment for shared spaces. 3022 // Initialize the alignment for shared spaces.
2942 int max_alignment = os::vm_page_size(); 3023 int max_alignment = os::vm_page_size();
2943 size_t cds_total = 0; 3024 size_t cds_total = 0;
2944 3025
2945 set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
2946 os::vm_allocation_granularity()));
2947
2948 MetaspaceShared::set_max_alignment(max_alignment); 3026 MetaspaceShared::set_max_alignment(max_alignment);
2949 3027
2950 if (DumpSharedSpaces) { 3028 if (DumpSharedSpaces) {
2951 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3029 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2952 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3030 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2953 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3031 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
2954 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3032 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
2955 3033
2956 // Initialize with the sum of the shared space sizes. The read-only 3034 // Initialize with the sum of the shared space sizes. The read-only
2957 // and read write metaspace chunks will be allocated out of this and the 3035 // and read write metaspace chunks will be allocated out of this and the
2958 // remainder is the misc code and data chunks. 3036 // remainder is the misc code and data chunks.
2959 cds_total = FileMapInfo::shared_spaces_size(); 3037 cds_total = FileMapInfo::shared_spaces_size();
3038 cds_total = align_size_up(cds_total, _reserve_alignment);
2960 _space_list = new VirtualSpaceList(cds_total/wordSize); 3039 _space_list = new VirtualSpaceList(cds_total/wordSize);
2961 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3040 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
2962 3041
3042 if (!_space_list->initialization_succeeded()) {
3043 vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3044 }
3045
2963 #ifdef _LP64 3046 #ifdef _LP64
3047 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3048 vm_exit_during_initialization("Unable to dump shared archive.",
3049 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3050 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3051 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3052 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3053 }
3054
2964 // Set the compressed klass pointer base so that decoding of these pointers works 3055 // Set the compressed klass pointer base so that decoding of these pointers works
2965 // properly when creating the shared archive. 3056 // properly when creating the shared archive.
2966 assert(UseCompressedOops && UseCompressedClassPointers, 3057 assert(UseCompressedOops && UseCompressedClassPointers,
2967 "UseCompressedOops and UseCompressedClassPointers must be set"); 3058 "UseCompressedOops and UseCompressedClassPointers must be set");
2968 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3059 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
2969 if (TraceMetavirtualspaceAllocation && Verbose) { 3060 if (TraceMetavirtualspaceAllocation && Verbose) {
2970 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3061 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
2971 _space_list->current_virtual_space()->bottom()); 3062 _space_list->current_virtual_space()->bottom());
2972 } 3063 }
2973 3064
2974 // Set the shift to zero.
2975 assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
2976 "CDS region is too large");
2977 Universe::set_narrow_klass_shift(0); 3065 Universe::set_narrow_klass_shift(0);
2978 #endif 3066 #endif
2979 3067
2980 } else { 3068 } else {
2981 // If using shared space, open the file that contains the shared space 3069 // If using shared space, open the file that contains the shared space
2990 // initialization fails, shared spaces [UseSharedSpaces] are 3078 // initialization fails, shared spaces [UseSharedSpaces] are
2991 // disabled and the file is closed. 3079 // disabled and the file is closed.
2992 // Map in spaces now also 3080 // Map in spaces now also
2993 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3081 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2994 FileMapInfo::set_current_info(mapinfo); 3082 FileMapInfo::set_current_info(mapinfo);
3083 cds_total = FileMapInfo::shared_spaces_size();
3084 cds_address = (address)mapinfo->region_base(0);
2995 } else { 3085 } else {
2996 assert(!mapinfo->is_open() && !UseSharedSpaces, 3086 assert(!mapinfo->is_open() && !UseSharedSpaces,
2997 "archive file not closed or shared spaces not disabled."); 3087 "archive file not closed or shared spaces not disabled.");
2998 } 3088 }
2999 cds_total = FileMapInfo::shared_spaces_size();
3000 cds_address = (address)mapinfo->region_base(0);
3001 } 3089 }
3002 3090
3003 #ifdef _LP64 3091 #ifdef _LP64
3004 // If UseCompressedClassPointers is set then allocate the metaspace area 3092 // If UseCompressedClassPointers is set then allocate the metaspace area
3005 // above the heap and above the CDS area (if it exists). 3093 // above the heap and above the CDS area (if it exists).
3006 if (using_class_space()) { 3094 if (using_class_space()) {
3007 if (UseSharedSpaces) { 3095 if (UseSharedSpaces) {
3008 allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address); 3096 char* cds_end = (char*)(cds_address + cds_total);
3097 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3098 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3009 } else { 3099 } else {
3010 allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0); 3100 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3101 allocate_metaspace_compressed_klass_ptrs(base, 0);
3011 } 3102 }
3012 } 3103 }
3013 #endif 3104 #endif
3014 3105
3015 // Initialize these before initializing the VirtualSpaceList 3106 // Initialize these before initializing the VirtualSpaceList
3021 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3112 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3022 (CompressedClassSpaceSize/BytesPerWord)*2); 3113 (CompressedClassSpaceSize/BytesPerWord)*2);
3023 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3114 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3024 // Arbitrarily set the initial virtual space to a multiple 3115 // Arbitrarily set the initial virtual space to a multiple
3025 // of the boot class loader size. 3116 // of the boot class loader size.
3026 size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size(); 3117 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3118 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3119
3027 // Initialize the list of virtual spaces. 3120 // Initialize the list of virtual spaces.
3028 _space_list = new VirtualSpaceList(word_size); 3121 _space_list = new VirtualSpaceList(word_size);
3029 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3122 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3030 } 3123
3124 if (!_space_list->initialization_succeeded()) {
3125 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3126 }
3127 }
3128
3129 MetaspaceGC::initialize();
3031 } 3130 }
3032 3131
3033 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 3132 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3034 size_t chunk_word_size, 3133 size_t chunk_word_size,
3035 size_t chunk_bunch) { 3134 size_t chunk_bunch) {
3037 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3136 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3038 if (chunk != NULL) { 3137 if (chunk != NULL) {
3039 return chunk; 3138 return chunk;
3040 } 3139 }
3041 3140
3042 return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch); 3141 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3043 } 3142 }
3044 3143
3045 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3144 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3046 3145
3047 assert(space_list() != NULL, 3146 assert(space_list() != NULL,
3110 return vsm()->allocate(word_size); 3209 return vsm()->allocate(word_size);
3111 } 3210 }
3112 } 3211 }
3113 3212
3114 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3213 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3115 MetaWord* result; 3214 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3116 MetaspaceGC::set_expand_after_GC(true); 3215 assert(delta_bytes > 0, "Must be");
3117 size_t before_inc = MetaspaceGC::capacity_until_GC(); 3216
3118 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; 3217 size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3119 MetaspaceGC::inc_capacity_until_GC(delta_bytes); 3218 size_t before_inc = after_inc - delta_bytes;
3219
3120 if (PrintGCDetails && Verbose) { 3220 if (PrintGCDetails && Verbose) {
3121 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT 3221 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3122 " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); 3222 " to " SIZE_FORMAT, before_inc, after_inc);
3123 } 3223 }
3124 3224
3125 result = allocate(word_size, mdtype); 3225 return allocate(word_size, mdtype);
3126
3127 return result;
3128 } 3226 }
3129 3227
3130 // Space allocated in the Metaspace. This may 3228 // Space allocated in the Metaspace. This may
3131 // be across several metadata virtual spaces. 3229 // be across several metadata virtual spaces.
3132 char* Metaspace::bottom() const { 3230 char* Metaspace::bottom() const {
3204 vsm()->deallocate(ptr, word_size); 3302 vsm()->deallocate(ptr, word_size);
3205 } 3303 }
3206 } 3304 }
3207 } 3305 }
3208 3306
3209 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3307
3308 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3210 bool read_only, MetaspaceObj::Type type, TRAPS) { 3309 bool read_only, MetaspaceObj::Type type, TRAPS) {
3211 if (HAS_PENDING_EXCEPTION) { 3310 if (HAS_PENDING_EXCEPTION) {
3212 assert(false, "Should not allocate with exception pending"); 3311 assert(false, "Should not allocate with exception pending");
3213 return NULL; // caller does a CHECK_NULL too 3312 return NULL; // caller does a CHECK_NULL too
3214 } 3313 }
3215 3314
3216 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3217
3218 // SSS: Should we align the allocations and make sure the sizes are aligned.
3219 MetaWord* result = NULL;
3220
3221 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3315 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3222 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3316 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3317
3223 // Allocate in metaspaces without taking out a lock, because it deadlocks 3318 // Allocate in metaspaces without taking out a lock, because it deadlocks
3224 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3319 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3225 // to revisit this for application class data sharing. 3320 // to revisit this for application class data sharing.
3226 if (DumpSharedSpaces) { 3321 if (DumpSharedSpaces) {
3227 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3322 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3228 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3323 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3229 result = space->allocate(word_size, NonClassType); 3324 MetaWord* result = space->allocate(word_size, NonClassType);
3230 if (result == NULL) { 3325 if (result == NULL) {
3231 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3326 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3232 } else { 3327 }
3233 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); 3328
3234 } 3329 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3235 return Metablock::initialize(result, word_size); 3330
3236 } 3331 // Zero initialize.
3237 3332 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3238 result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3333
3334 return result;
3335 }
3336
3337 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3338
3339 // Try to allocate metadata.
3340 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3239 3341
3240 if (result == NULL) { 3342 if (result == NULL) {
3241 // Try to clean out some memory and retry. 3343 // Allocation failed.
3242 result = 3344 if (is_init_completed()) {
3243 Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3345 // Only start a GC if the bootstrapping has completed.
3244 loader_data, word_size, mdtype); 3346
3245 3347 // Try to clean out some memory and retry.
3246 // If result is still null, we are out of memory. 3348 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3247 if (result == NULL) { 3349 loader_data, word_size, mdtype);
3248 if (Verbose && TraceMetadataChunkAllocation) { 3350 }
3249 gclog_or_tty->print_cr("Metaspace allocation failed for size " 3351 }
3250 SIZE_FORMAT, word_size); 3352
3251 if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty); 3353 if (result == NULL) {
3252 MetaspaceAux::dump(gclog_or_tty); 3354 report_metadata_oome(loader_data, word_size, mdtype, CHECK_NULL);
3253 } 3355 }
3254 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3356
3255 const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" : 3357 // Zero initialize.
3256 "Metadata space"; 3358 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3257 report_java_out_of_memory(space_string); 3359
3258 3360 return result;
3259 if (JvmtiExport::should_post_resource_exhausted()) { 3361 }
3260 JvmtiExport::post_resource_exhausted( 3362
3261 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3363 size_t Metaspace::class_chunk_size(size_t word_size) {
3262 space_string); 3364 assert(using_class_space(), "Has to use class space");
3263 } 3365 return class_vsm()->calc_chunk_size(word_size);
3264 if (is_class_space_allocation(mdtype)) { 3366 }
3265 THROW_OOP_0(Universe::out_of_memory_error_class_metaspace()); 3367
3266 } else { 3368 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
3267 THROW_OOP_0(Universe::out_of_memory_error_metaspace()); 3369 // If result is still null, we are out of memory.
3268 } 3370 if (Verbose && TraceMetadataChunkAllocation) {
3269 } 3371 gclog_or_tty->print_cr("Metaspace allocation failed for size "
3270 } 3372 SIZE_FORMAT, word_size);
3271 return Metablock::initialize(result, word_size); 3373 if (loader_data->metaspace_or_null() != NULL) {
3374 loader_data->dump(gclog_or_tty);
3375 }
3376 MetaspaceAux::dump(gclog_or_tty);
3377 }
3378
3379 bool out_of_compressed_class_space = false;
3380 if (is_class_space_allocation(mdtype)) {
3381 Metaspace* metaspace = loader_data->metaspace_non_null();
3382 out_of_compressed_class_space =
3383 MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3384 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3385 CompressedClassSpaceSize;
3386 }
3387
3388 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3389 const char* space_string = out_of_compressed_class_space ?
3390 "Compressed class space" : "Metaspace";
3391
3392 report_java_out_of_memory(space_string);
3393
3394 if (JvmtiExport::should_post_resource_exhausted()) {
3395 JvmtiExport::post_resource_exhausted(
3396 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3397 space_string);
3398 }
3399
3400 if (!is_init_completed()) {
3401 vm_exit_during_initialization("OutOfMemoryError", space_string);
3402 }
3403
3404 if (out_of_compressed_class_space) {
3405 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3406 } else {
3407 THROW_OOP(Universe::out_of_memory_error_metaspace());
3408 }
3272 } 3409 }
3273 3410
3274 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3411 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3275 assert(DumpSharedSpaces, "sanity"); 3412 assert(DumpSharedSpaces, "sanity");
3276 3413
3418 3555
3419 void TestMetaspaceAux_test() { 3556 void TestMetaspaceAux_test() {
3420 TestMetaspaceAuxTest::test(); 3557 TestMetaspaceAuxTest::test();
3421 } 3558 }
3422 3559
3560 class TestVirtualSpaceNodeTest {
3561 static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3562 size_t& num_small_chunks,
3563 size_t& num_specialized_chunks) {
3564 num_medium_chunks = words_left / MediumChunk;
3565 words_left = words_left % MediumChunk;
3566
3567 num_small_chunks = words_left / SmallChunk;
3568 words_left = words_left % SmallChunk;
3569 // how many specialized chunks can we get?
3570 num_specialized_chunks = words_left / SpecializedChunk;
3571 assert(words_left % SpecializedChunk == 0, "should be nothing left");
3572 }
3573
3574 public:
3575 static void test() {
3576 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3577 const size_t vsn_test_size_words = MediumChunk * 4;
3578 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3579
3580 // The chunk sizes must be multiples of eachother, or this will fail
3581 STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3582 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3583
3584 { // No committed memory in VSN
3585 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3586 VirtualSpaceNode vsn(vsn_test_size_bytes);
3587 vsn.initialize();
3588 vsn.retire(&cm);
3589 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3590 }
3591
3592 { // All of VSN is committed, half is used by chunks
3593 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3594 VirtualSpaceNode vsn(vsn_test_size_bytes);
3595 vsn.initialize();
3596 vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3597 vsn.get_chunk_vs(MediumChunk);
3598 vsn.get_chunk_vs(MediumChunk);
3599 vsn.retire(&cm);
3600 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3601 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3602 }
3603
3604 { // 4 pages of VSN is committed, some is used by chunks
3605 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3606 VirtualSpaceNode vsn(vsn_test_size_bytes);
3607 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3608 assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
3609 vsn.initialize();
3610 vsn.expand_by(page_chunks, page_chunks);
3611 vsn.get_chunk_vs(SmallChunk);
3612 vsn.get_chunk_vs(SpecializedChunk);
3613 vsn.retire(&cm);
3614
3615 // committed - used = words left to retire
3616 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3617
3618 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3619 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3620
3621 assert(num_medium_chunks == 0, "should not get any medium chunks");
3622 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3623 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3624 }
3625
3626 { // Half of VSN is committed, a humongous chunk is used
3627 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3628 VirtualSpaceNode vsn(vsn_test_size_bytes);
3629 vsn.initialize();
3630 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3631 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3632 vsn.retire(&cm);
3633
3634 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3635 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3636 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3637
3638 assert(num_medium_chunks == 0, "should not get any medium chunks");
3639 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3640 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3641 }
3642
3643 }
3644 };
3645
3646 void TestVirtualSpaceNode_test() {
3647 TestVirtualSpaceNodeTest::test();
3648 }
3649
3423 #endif 3650 #endif