comparison src/share/vm/memory/metaspace.cpp @ 14422:2b8e28fdf503

Merge
author kvn
date Tue, 05 Nov 2013 17:38:04 -0800
parents b4aa8fc5d0d5
children c8fc12209830 610be0309a79
comparison
equal deleted inserted replaced
14421:3068270ba476 14422:2b8e28fdf503
21 * questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 #include "precompiled.hpp" 24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp" 25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/allocation.hpp"
26 #include "memory/binaryTreeDictionary.hpp" 27 #include "memory/binaryTreeDictionary.hpp"
27 #include "memory/freeList.hpp" 28 #include "memory/freeList.hpp"
28 #include "memory/collectorPolicy.hpp" 29 #include "memory/collectorPolicy.hpp"
29 #include "memory/filemap.hpp" 30 #include "memory/filemap.hpp"
30 #include "memory/freeList.hpp" 31 #include "memory/freeList.hpp"
31 #include "memory/metablock.hpp" 32 #include "memory/gcLocker.hpp"
32 #include "memory/metachunk.hpp" 33 #include "memory/metachunk.hpp"
33 #include "memory/metaspace.hpp" 34 #include "memory/metaspace.hpp"
34 #include "memory/metaspaceShared.hpp" 35 #include "memory/metaspaceShared.hpp"
35 #include "memory/resourceArea.hpp" 36 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp" 37 #include "memory/universe.hpp"
38 #include "runtime/atomic.inline.hpp"
37 #include "runtime/globals.hpp" 39 #include "runtime/globals.hpp"
40 #include "runtime/init.hpp"
38 #include "runtime/java.hpp" 41 #include "runtime/java.hpp"
39 #include "runtime/mutex.hpp" 42 #include "runtime/mutex.hpp"
40 #include "runtime/orderAccess.hpp" 43 #include "runtime/orderAccess.hpp"
41 #include "services/memTracker.hpp" 44 #include "services/memTracker.hpp"
45 #include "services/memoryService.hpp"
42 #include "utilities/copy.hpp" 46 #include "utilities/copy.hpp"
43 #include "utilities/debug.hpp" 47 #include "utilities/debug.hpp"
44 48
45 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary; 49 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
46 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary; 50 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
47 // Define this macro to enable slow integrity checking of 51
48 // the free chunk lists 52 // Set this constant to enable slow integrity checking of the free chunk lists
49 const bool metaspace_slow_verify = false; 53 const bool metaspace_slow_verify = false;
50 54
51 // Parameters for stress mode testing 55 size_t const allocation_from_dictionary_limit = 4 * K;
52 const uint metadata_deallocate_a_lot_block = 10;
53 const uint metadata_deallocate_a_lock_chunk = 3;
54 size_t const allocation_from_dictionary_limit = 64 * K;
55 56
56 MetaWord* last_allocated = 0; 57 MetaWord* last_allocated = 0;
57 58
58 size_t Metaspace::_class_metaspace_size; 59 size_t Metaspace::_compressed_class_space_size;
59 60
60 // Used in declarations in SpaceManager and ChunkManager 61 // Used in declarations in SpaceManager and ChunkManager
61 enum ChunkIndex { 62 enum ChunkIndex {
62 ZeroIndex = 0, 63 ZeroIndex = 0,
63 SpecializedIndex = ZeroIndex, 64 SpecializedIndex = ZeroIndex,
72 ClassSpecializedChunk = 128, 73 ClassSpecializedChunk = 128,
73 SpecializedChunk = 128, 74 SpecializedChunk = 128,
74 ClassSmallChunk = 256, 75 ClassSmallChunk = 256,
75 SmallChunk = 512, 76 SmallChunk = 512,
76 ClassMediumChunk = 4 * K, 77 ClassMediumChunk = 4 * K,
77 MediumChunk = 8 * K, 78 MediumChunk = 8 * K
78 HumongousChunkGranularity = 8
79 }; 79 };
80 80
81 static ChunkIndex next_chunk_index(ChunkIndex i) { 81 static ChunkIndex next_chunk_index(ChunkIndex i) {
82 assert(i < NumberOfInUseLists, "Out of bound"); 82 assert(i < NumberOfInUseLists, "Out of bound");
83 return (ChunkIndex) (i+1); 83 return (ChunkIndex) (i+1);
84 } 84 }
85 85
86 // Originally _capacity_until_GC was set to MetaspaceSize here but 86 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
87 // the default MetaspaceSize before argument processing was being
88 // used which was not the desired value. See the code
89 // in should_expand() to see how the initialization is handled
90 // now.
91 size_t MetaspaceGC::_capacity_until_GC = 0;
92 bool MetaspaceGC::_expand_after_GC = false;
93 uint MetaspaceGC::_shrink_factor = 0; 87 uint MetaspaceGC::_shrink_factor = 0;
94 bool MetaspaceGC::_should_concurrent_collect = false; 88 bool MetaspaceGC::_should_concurrent_collect = false;
95 89
96 // Blocks of space for metadata are allocated out of Metachunks.
97 //
98 // Metachunk are allocated out of MetadataVirtualspaces and once
99 // allocated there is no explicit link between a Metachunk and
100 // the MetadataVirtualspaces from which it was allocated.
101 //
102 // Each SpaceManager maintains a
103 // list of the chunks it is using and the current chunk. The current
104 // chunk is the chunk from which allocations are done. Space freed in
105 // a chunk is placed on the free list of blocks (BlockFreelist) and
106 // reused from there.
107
108 typedef class FreeList<Metachunk> ChunkList; 90 typedef class FreeList<Metachunk> ChunkList;
109 91
110 // Manages the global free lists of chunks. 92 // Manages the global free lists of chunks.
111 // Has three lists of free chunks, and a total size and 93 class ChunkManager : public CHeapObj<mtInternal> {
112 // count that includes all three 94 friend class TestVirtualSpaceNodeTest;
113
114 class ChunkManager VALUE_OBJ_CLASS_SPEC {
115 95
116 // Free list of chunks of different sizes. 96 // Free list of chunks of different sizes.
117 // SpecializedChunk 97 // SpecializedChunk
118 // SmallChunk 98 // SmallChunk
119 // MediumChunk 99 // MediumChunk
120 // HumongousChunk 100 // HumongousChunk
121 ChunkList _free_chunks[NumberOfFreeLists]; 101 ChunkList _free_chunks[NumberOfFreeLists];
122
123 102
124 // HumongousChunk 103 // HumongousChunk
125 ChunkTreeDictionary _humongous_dictionary; 104 ChunkTreeDictionary _humongous_dictionary;
126 105
127 // ChunkManager in all lists of this type 106 // ChunkManager in all lists of this type
156 } 135 }
157 void verify_free_chunks_count(); 136 void verify_free_chunks_count();
158 137
159 public: 138 public:
160 139
161 ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {} 140 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
141 : _free_chunks_total(0), _free_chunks_count(0) {
142 _free_chunks[SpecializedIndex].set_size(specialized_size);
143 _free_chunks[SmallIndex].set_size(small_size);
144 _free_chunks[MediumIndex].set_size(medium_size);
145 }
162 146
163 // add or delete (return) a chunk to the global freelist. 147 // add or delete (return) a chunk to the global freelist.
164 Metachunk* chunk_freelist_allocate(size_t word_size); 148 Metachunk* chunk_freelist_allocate(size_t word_size);
165 void chunk_freelist_deallocate(Metachunk* chunk);
166 149
167 // Map a size to a list index assuming that there are lists 150 // Map a size to a list index assuming that there are lists
168 // for special, small, medium, and humongous chunks. 151 // for special, small, medium, and humongous chunks.
169 static ChunkIndex list_index(size_t size); 152 static ChunkIndex list_index(size_t size);
170 153
175 // Add the simple linked list of chunks to the freelist of chunks 158 // Add the simple linked list of chunks to the freelist of chunks
176 // of type index. 159 // of type index.
177 void return_chunks(ChunkIndex index, Metachunk* chunks); 160 void return_chunks(ChunkIndex index, Metachunk* chunks);
178 161
179 // Total of the space in the free chunks list 162 // Total of the space in the free chunks list
180 size_t free_chunks_total(); 163 size_t free_chunks_total_words();
181 size_t free_chunks_total_in_bytes(); 164 size_t free_chunks_total_bytes();
182 165
183 // Number of chunks in the free chunks list 166 // Number of chunks in the free chunks list
184 size_t free_chunks_count(); 167 size_t free_chunks_count();
185 168
186 void inc_free_chunks_total(size_t v, size_t count = 1) { 169 void inc_free_chunks_total(size_t v, size_t count = 1) {
194 ChunkList* free_chunks(ChunkIndex index); 177 ChunkList* free_chunks(ChunkIndex index);
195 178
196 // Returns the list for the given chunk word size. 179 // Returns the list for the given chunk word size.
197 ChunkList* find_free_chunks_list(size_t word_size); 180 ChunkList* find_free_chunks_list(size_t word_size);
198 181
199 // Add and remove from a list by size. Selects 182 // Remove from a list by size. Selects list based on size of chunk.
200 // list based on size of chunk.
201 void free_chunks_put(Metachunk* chuck);
202 Metachunk* free_chunks_get(size_t chunk_word_size); 183 Metachunk* free_chunks_get(size_t chunk_word_size);
203 184
204 // Debug support 185 // Debug support
205 void verify(); 186 void verify();
206 void slow_verify() { 187 void slow_verify() {
217 void verify_free_chunks_total(); 198 void verify_free_chunks_total();
218 199
219 void locked_print_free_chunks(outputStream* st); 200 void locked_print_free_chunks(outputStream* st);
220 void locked_print_sum_free_chunks(outputStream* st); 201 void locked_print_sum_free_chunks(outputStream* st);
221 202
222 void print_on(outputStream* st); 203 void print_on(outputStream* st) const;
223 }; 204 };
224 205
225 // Used to manage the free list of Metablocks (a block corresponds 206 // Used to manage the free list of Metablocks (a block corresponds
226 // to the allocation of a quantum of metadata). 207 // to the allocation of a quantum of metadata).
227 class BlockFreelist VALUE_OBJ_CLASS_SPEC { 208 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
228 BlockTreeDictionary* _dictionary; 209 BlockTreeDictionary* _dictionary;
229 static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size); 210
211 // Only allocate and split from freelist if the size of the allocation
212 // is at least 1/4th the size of the available block.
213 const static int WasteMultiplier = 4;
230 214
231 // Accessors 215 // Accessors
232 BlockTreeDictionary* dictionary() const { return _dictionary; } 216 BlockTreeDictionary* dictionary() const { return _dictionary; }
233 217
234 public: 218 public:
248 } 232 }
249 233
250 void print_on(outputStream* st) const; 234 void print_on(outputStream* st) const;
251 }; 235 };
252 236
237 // A VirtualSpaceList node.
253 class VirtualSpaceNode : public CHeapObj<mtClass> { 238 class VirtualSpaceNode : public CHeapObj<mtClass> {
254 friend class VirtualSpaceList; 239 friend class VirtualSpaceList;
255 240
256 // Link to next VirtualSpaceNode 241 // Link to next VirtualSpaceNode
257 VirtualSpaceNode* _next; 242 VirtualSpaceNode* _next;
270 255
271 // The first Metachunk will be allocated at the bottom of the 256 // The first Metachunk will be allocated at the bottom of the
272 // VirtualSpace 257 // VirtualSpace
273 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 258 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
274 259
275 void inc_container_count(); 260 // Committed but unused space in the virtual space
276 #ifdef ASSERT 261 size_t free_words_in_vs() const;
277 uint container_count_slow();
278 #endif
279
280 public: 262 public:
281 263
282 VirtualSpaceNode(size_t byte_size); 264 VirtualSpaceNode(size_t byte_size);
283 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 265 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
284 ~VirtualSpaceNode(); 266 ~VirtualSpaceNode();
285 267
286 // Convenience functions for logical bottom and end 268 // Convenience functions for logical bottom and end
287 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 269 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
288 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 270 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
271
272 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
273 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
274
275 bool is_pre_committed() const { return _virtual_space.special(); }
289 276
290 // address of next available space in _virtual_space; 277 // address of next available space in _virtual_space;
291 // Accessors 278 // Accessors
292 VirtualSpaceNode* next() { return _next; } 279 VirtualSpaceNode* next() { return _next; }
293 void set_next(VirtualSpaceNode* v) { _next = v; } 280 void set_next(VirtualSpaceNode* v) { _next = v; }
304 291
305 MetaWord* top() const { return _top; } 292 MetaWord* top() const { return _top; }
306 void inc_top(size_t word_size) { _top += word_size; } 293 void inc_top(size_t word_size) { _top += word_size; }
307 294
308 uintx container_count() { return _container_count; } 295 uintx container_count() { return _container_count; }
296 void inc_container_count();
309 void dec_container_count(); 297 void dec_container_count();
310 #ifdef ASSERT 298 #ifdef ASSERT
299 uint container_count_slow();
311 void verify_container_count(); 300 void verify_container_count();
312 #endif 301 #endif
313 302
314 // used and capacity in this single entry in the list 303 // used and capacity in this single entry in the list
315 size_t used_words_in_vs() const; 304 size_t used_words_in_vs() const;
316 size_t capacity_words_in_vs() const; 305 size_t capacity_words_in_vs() const;
317 size_t free_words_in_vs() const;
318 306
319 bool initialize(); 307 bool initialize();
320 308
321 // get space from the virtual space 309 // get space from the virtual space
322 Metachunk* take_from_committed(size_t chunk_word_size); 310 Metachunk* take_from_committed(size_t chunk_word_size);
323 311
324 // Allocate a chunk from the virtual space and return it. 312 // Allocate a chunk from the virtual space and return it.
325 Metachunk* get_chunk_vs(size_t chunk_word_size); 313 Metachunk* get_chunk_vs(size_t chunk_word_size);
326 Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
327 314
328 // Expands/shrinks the committed space in a virtual space. Delegates 315 // Expands/shrinks the committed space in a virtual space. Delegates
329 // to Virtualspace 316 // to Virtualspace
330 bool expand_by(size_t words, bool pre_touch = false); 317 bool expand_by(size_t min_words, size_t preferred_words);
331 bool shrink_by(size_t words);
332 318
333 // In preparation for deleting this node, remove all the chunks 319 // In preparation for deleting this node, remove all the chunks
334 // in the node from any freelist. 320 // in the node from any freelist.
335 void purge(ChunkManager* chunk_manager); 321 void purge(ChunkManager* chunk_manager);
336 322
323 // If an allocation doesn't fit in the current node a new node is created.
324 // Allocate chunks out of the remaining committed space in this node
325 // to avoid wasting that memory.
326 // This always adds up because all the chunk sizes are multiples of
327 // the smallest chunk size.
328 void retire(ChunkManager* chunk_manager);
329
337 #ifdef ASSERT 330 #ifdef ASSERT
338 // Debug support 331 // Debug support
339 static void verify_virtual_space_total();
340 static void verify_virtual_space_count();
341 void mangle(); 332 void mangle();
342 #endif 333 #endif
343 334
344 void print_on(outputStream* st) const; 335 void print_on(outputStream* st) const;
345 }; 336 };
346 337
338 #define assert_is_ptr_aligned(ptr, alignment) \
339 assert(is_ptr_aligned(ptr, alignment), \
340 err_msg(PTR_FORMAT " is not aligned to " \
341 SIZE_FORMAT, ptr, alignment))
342
343 #define assert_is_size_aligned(size, alignment) \
344 assert(is_size_aligned(size, alignment), \
345 err_msg(SIZE_FORMAT " is not aligned to " \
346 SIZE_FORMAT, size, alignment))
347
348
349 // Decide if large pages should be committed when the memory is reserved.
350 static bool should_commit_large_pages_when_reserving(size_t bytes) {
351 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
352 size_t words = bytes / BytesPerWord;
353 bool is_class = false; // We never reserve large pages for the class space.
354 if (MetaspaceGC::can_expand(words, is_class) &&
355 MetaspaceGC::allowed_expansion() >= words) {
356 return true;
357 }
358 }
359
360 return false;
361 }
362
347 // byte_size is the size of the associated virtualspace. 363 // byte_size is the size of the associated virtualspace.
348 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 364 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
349 // align up to vm allocation granularity 365 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
350 byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
351 366
352 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 367 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
353 // configurable address, generally at the top of the Java heap so other 368 // configurable address, generally at the top of the Java heap so other
354 // memory addresses don't conflict. 369 // memory addresses don't conflict.
355 if (DumpSharedSpaces) { 370 if (DumpSharedSpaces) {
356 char* shared_base = (char*)SharedBaseAddress; 371 bool large_pages = false; // No large pages when dumping the CDS archive.
357 _rs = ReservedSpace(byte_size, 0, false, shared_base, 0); 372 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
373
374 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
358 if (_rs.is_reserved()) { 375 if (_rs.is_reserved()) {
359 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 376 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
360 } else { 377 } else {
361 // Get a mmap region anywhere if the SharedBaseAddress fails. 378 // Get a mmap region anywhere if the SharedBaseAddress fails.
362 _rs = ReservedSpace(byte_size); 379 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
363 } 380 }
364 MetaspaceShared::set_shared_rs(&_rs); 381 MetaspaceShared::set_shared_rs(&_rs);
365 } else { 382 } else {
366 _rs = ReservedSpace(byte_size); 383 bool large_pages = should_commit_large_pages_when_reserving(bytes);
367 } 384
368 385 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
369 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 386 }
387
388 if (_rs.is_reserved()) {
389 assert(_rs.base() != NULL, "Catch if we get a NULL address");
390 assert(_rs.size() != 0, "Catch if we get a 0 size");
391 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
392 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
393
394 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
395 }
370 } 396 }
371 397
372 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 398 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
373 Metachunk* chunk = first_chunk(); 399 Metachunk* chunk = first_chunk();
374 Metachunk* invalid_chunk = (Metachunk*) top(); 400 Metachunk* invalid_chunk = (Metachunk*) top();
375 while (chunk < invalid_chunk ) { 401 while (chunk < invalid_chunk ) {
376 assert(chunk->is_free(), "Should be marked free"); 402 assert(chunk->is_tagged_free(), "Should be tagged free");
377 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 403 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
378 chunk_manager->remove_chunk(chunk); 404 chunk_manager->remove_chunk(chunk);
379 assert(chunk->next() == NULL && 405 assert(chunk->next() == NULL &&
380 chunk->prev() == NULL, 406 chunk->prev() == NULL,
381 "Was not removed from its list"); 407 "Was not removed from its list");
382 chunk = (Metachunk*) next; 408 chunk = (Metachunk*) next;
383 } 409 }
384 } 410 }
385 411
386 #ifdef ASSERT 412 #ifdef ASSERT
387 uint VirtualSpaceNode::container_count_slow() { 413 uint VirtualSpaceNode::container_count_slow() {
391 while (chunk < invalid_chunk ) { 417 while (chunk < invalid_chunk ) {
392 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 418 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
393 // Don't count the chunks on the free lists. Those are 419 // Don't count the chunks on the free lists. Those are
394 // still part of the VirtualSpaceNode but not currently 420 // still part of the VirtualSpaceNode but not currently
395 // counted. 421 // counted.
396 if (!chunk->is_free()) { 422 if (!chunk->is_tagged_free()) {
397 count++; 423 count++;
398 } 424 }
399 chunk = (Metachunk*) next; 425 chunk = (Metachunk*) next;
400 } 426 }
401 return count; 427 return count;
402 } 428 }
403 #endif 429 #endif
404 430
405 // List of VirtualSpaces for metadata allocation. 431 // List of VirtualSpaces for metadata allocation.
406 // It has a _next link for singly linked list and a MemRegion
407 // for total space in the VirtualSpace.
408 class VirtualSpaceList : public CHeapObj<mtClass> { 432 class VirtualSpaceList : public CHeapObj<mtClass> {
409 friend class VirtualSpaceNode; 433 friend class VirtualSpaceNode;
410 434
411 enum VirtualSpaceSizes { 435 enum VirtualSpaceSizes {
412 VirtualSpaceSize = 256 * K 436 VirtualSpaceSize = 256 * K
413 }; 437 };
414 438
415 // Global list of virtual spaces
416 // Head of the list 439 // Head of the list
417 VirtualSpaceNode* _virtual_space_list; 440 VirtualSpaceNode* _virtual_space_list;
418 // virtual space currently being used for allocations 441 // virtual space currently being used for allocations
419 VirtualSpaceNode* _current_virtual_space; 442 VirtualSpaceNode* _current_virtual_space;
420 // Free chunk list for all other metadata 443
421 ChunkManager _chunk_manager; 444 // Is this VirtualSpaceList used for the compressed class space
422
423 // Can this virtual list allocate >1 spaces? Also, used to determine
424 // whether to allocate unlimited small chunks in this virtual space
425 bool _is_class; 445 bool _is_class;
426 bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; } 446
427 447 // Sum of reserved and committed memory in the virtual spaces
428 // Sum of space in all virtual spaces and number of virtual spaces 448 size_t _reserved_words;
429 size_t _virtual_space_total; 449 size_t _committed_words;
450
451 // Number of virtual spaces
430 size_t _virtual_space_count; 452 size_t _virtual_space_count;
431 453
432 ~VirtualSpaceList(); 454 ~VirtualSpaceList();
433 455
434 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 456 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
438 } 460 }
439 void set_current_virtual_space(VirtualSpaceNode* v) { 461 void set_current_virtual_space(VirtualSpaceNode* v) {
440 _current_virtual_space = v; 462 _current_virtual_space = v;
441 } 463 }
442 464
443 void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size); 465 void link_vs(VirtualSpaceNode* new_entry);
444 466
445 // Get another virtual space and add it to the list. This 467 // Get another virtual space and add it to the list. This
446 // is typically prompted by a failed attempt to allocate a chunk 468 // is typically prompted by a failed attempt to allocate a chunk
447 // and is typically followed by the allocation of a chunk. 469 // and is typically followed by the allocation of a chunk.
448 bool grow_vs(size_t vs_word_size); 470 bool create_new_virtual_space(size_t vs_word_size);
471
472 // Chunk up the unused committed space in the current
473 // virtual space and add the chunks to the free list.
474 void retire_current_virtual_space();
449 475
450 public: 476 public:
451 VirtualSpaceList(size_t word_size); 477 VirtualSpaceList(size_t word_size);
452 VirtualSpaceList(ReservedSpace rs); 478 VirtualSpaceList(ReservedSpace rs);
453 479
455 481
456 Metachunk* get_new_chunk(size_t word_size, 482 Metachunk* get_new_chunk(size_t word_size,
457 size_t grow_chunks_by_words, 483 size_t grow_chunks_by_words,
458 size_t medium_chunk_bunch); 484 size_t medium_chunk_bunch);
459 485
460 // Get the first chunk for a Metaspace. Used for 486 bool expand_node_by(VirtualSpaceNode* node,
461 // special cases such as the boot class loader, reflection 487 size_t min_words,
462 // class loader and anonymous class loader. 488 size_t preferred_words);
463 Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch); 489
490 bool expand_by(size_t min_words,
491 size_t preferred_words);
464 492
465 VirtualSpaceNode* current_virtual_space() { 493 VirtualSpaceNode* current_virtual_space() {
466 return _current_virtual_space; 494 return _current_virtual_space;
467 } 495 }
468 496
469 ChunkManager* chunk_manager() { return &_chunk_manager; }
470 bool is_class() const { return _is_class; } 497 bool is_class() const { return _is_class; }
471 498
472 // Allocate the first virtualspace. 499 bool initialization_succeeded() { return _virtual_space_list != NULL; }
473 void initialize(size_t word_size); 500
474 501 size_t reserved_words() { return _reserved_words; }
475 size_t virtual_space_total() { return _virtual_space_total; } 502 size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
476 503 size_t committed_words() { return _committed_words; }
477 void inc_virtual_space_total(size_t v); 504 size_t committed_bytes() { return committed_words() * BytesPerWord; }
478 void dec_virtual_space_total(size_t v); 505
506 void inc_reserved_words(size_t v);
507 void dec_reserved_words(size_t v);
508 void inc_committed_words(size_t v);
509 void dec_committed_words(size_t v);
479 void inc_virtual_space_count(); 510 void inc_virtual_space_count();
480 void dec_virtual_space_count(); 511 void dec_virtual_space_count();
481 512
482 // Unlink empty VirtualSpaceNodes and free it. 513 // Unlink empty VirtualSpaceNodes and free it.
483 void purge(); 514 void purge(ChunkManager* chunk_manager);
484
485 // Used and capacity in the entire list of virtual spaces.
486 // These are global values shared by all Metaspaces
487 size_t capacity_words_sum();
488 size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
489 size_t used_words_sum();
490 size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
491 515
492 bool contains(const void *ptr); 516 bool contains(const void *ptr);
493 517
494 void print_on(outputStream* st) const; 518 void print_on(outputStream* st) const;
495 519
513 }; 537 };
514 }; 538 };
515 539
516 class Metadebug : AllStatic { 540 class Metadebug : AllStatic {
517 // Debugging support for Metaspaces 541 // Debugging support for Metaspaces
518 static int _deallocate_block_a_lot_count;
519 static int _deallocate_chunk_a_lot_count;
520 static int _allocation_fail_alot_count; 542 static int _allocation_fail_alot_count;
521 543
522 public: 544 public:
523 static int deallocate_block_a_lot_count() {
524 return _deallocate_block_a_lot_count;
525 }
526 static void set_deallocate_block_a_lot_count(int v) {
527 _deallocate_block_a_lot_count = v;
528 }
529 static void inc_deallocate_block_a_lot_count() {
530 _deallocate_block_a_lot_count++;
531 }
532 static int deallocate_chunk_a_lot_count() {
533 return _deallocate_chunk_a_lot_count;
534 }
535 static void reset_deallocate_chunk_a_lot_count() {
536 _deallocate_chunk_a_lot_count = 1;
537 }
538 static void inc_deallocate_chunk_a_lot_count() {
539 _deallocate_chunk_a_lot_count++;
540 }
541 545
542 static void init_allocation_fail_alot_count(); 546 static void init_allocation_fail_alot_count();
543 #ifdef ASSERT 547 #ifdef ASSERT
544 static bool test_metadata_failure(); 548 static bool test_metadata_failure();
545 #endif 549 #endif
546
547 static void deallocate_chunk_a_lot(SpaceManager* sm,
548 size_t chunk_word_size);
549 static void deallocate_block_a_lot(SpaceManager* sm,
550 size_t chunk_word_size);
551
552 }; 550 };
553 551
554 int Metadebug::_deallocate_block_a_lot_count = 0;
555 int Metadebug::_deallocate_chunk_a_lot_count = 0;
556 int Metadebug::_allocation_fail_alot_count = 0; 552 int Metadebug::_allocation_fail_alot_count = 0;
557 553
558 // SpaceManager - used by Metaspace to handle allocations 554 // SpaceManager - used by Metaspace to handle allocations
559 class SpaceManager : public CHeapObj<mtClass> { 555 class SpaceManager : public CHeapObj<mtClass> {
560 friend class Metaspace; 556 friend class Metaspace;
565 // protects allocations and contains. 561 // protects allocations and contains.
566 Mutex* const _lock; 562 Mutex* const _lock;
567 563
568 // Type of metadata allocated. 564 // Type of metadata allocated.
569 Metaspace::MetadataType _mdtype; 565 Metaspace::MetadataType _mdtype;
570
571 // Chunk related size
572 size_t _medium_chunk_bunch;
573 566
574 // List of chunks in use by this SpaceManager. Allocations 567 // List of chunks in use by this SpaceManager. Allocations
575 // are done from the current chunk. The list is used for deallocating 568 // are done from the current chunk. The list is used for deallocating
576 // chunks when the SpaceManager is freed. 569 // chunks when the SpaceManager is freed.
577 Metachunk* _chunks_in_use[NumberOfInUseLists]; 570 Metachunk* _chunks_in_use[NumberOfInUseLists];
578 Metachunk* _current_chunk; 571 Metachunk* _current_chunk;
579 572
580 // Virtual space where allocation comes from.
581 VirtualSpaceList* _vs_list;
582
583 // Number of small chunks to allocate to a manager 573 // Number of small chunks to allocate to a manager
584 // If class space manager, small chunks are unlimited 574 // If class space manager, small chunks are unlimited
585 static uint const _small_chunk_limit; 575 static uint const _small_chunk_limit;
586 576
587 // Sum of all space in allocated chunks 577 // Sum of all space in allocated chunks
610 BlockFreelist* block_freelists() const { 600 BlockFreelist* block_freelists() const {
611 return (BlockFreelist*) &_block_freelists; 601 return (BlockFreelist*) &_block_freelists;
612 } 602 }
613 603
614 Metaspace::MetadataType mdtype() { return _mdtype; } 604 Metaspace::MetadataType mdtype() { return _mdtype; }
615 VirtualSpaceList* vs_list() const { return _vs_list; } 605
606 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
607 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
616 608
617 Metachunk* current_chunk() const { return _current_chunk; } 609 Metachunk* current_chunk() const { return _current_chunk; }
618 void set_current_chunk(Metachunk* v) { 610 void set_current_chunk(Metachunk* v) {
619 _current_chunk = v; 611 _current_chunk = v;
620 } 612 }
621 613
622 Metachunk* find_current_chunk(size_t word_size); 614 Metachunk* find_current_chunk(size_t word_size);
623 615
624 // Add chunk to the list of chunks in use 616 // Add chunk to the list of chunks in use
625 void add_chunk(Metachunk* v, bool make_current); 617 void add_chunk(Metachunk* v, bool make_current);
618 void retire_current_chunk();
626 619
627 Mutex* lock() const { return _lock; } 620 Mutex* lock() const { return _lock; }
628 621
629 const char* chunk_size_name(ChunkIndex index) const; 622 const char* chunk_size_name(ChunkIndex index) const;
630 623
631 protected: 624 protected:
632 void initialize(); 625 void initialize();
633 626
634 public: 627 public:
635 SpaceManager(Metaspace::MetadataType mdtype, 628 SpaceManager(Metaspace::MetadataType mdtype,
636 Mutex* lock, 629 Mutex* lock);
637 VirtualSpaceList* vs_list);
638 ~SpaceManager(); 630 ~SpaceManager();
639 631
640 enum ChunkMultiples { 632 enum ChunkMultiples {
641 MediumChunkMultiple = 4 633 MediumChunkMultiple = 4
642 }; 634 };
643 635
636 bool is_class() { return _mdtype == Metaspace::ClassType; }
637
644 // Accessors 638 // Accessors
645 size_t specialized_chunk_size() { return SpecializedChunk; } 639 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
646 size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; } 640 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
647 size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; } 641 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
648 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } 642 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
643
644 size_t smallest_chunk_size() { return specialized_chunk_size(); }
649 645
650 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 646 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
651 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 647 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
652 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 648 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
653 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 649 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
701 // Called when an allocation from the current chunk fails. 697 // Called when an allocation from the current chunk fails.
702 // Gets a new chunk (may require getting a new virtual space), 698 // Gets a new chunk (may require getting a new virtual space),
703 // and allocates from that chunk. 699 // and allocates from that chunk.
704 MetaWord* grow_and_allocate(size_t word_size); 700 MetaWord* grow_and_allocate(size_t word_size);
705 701
702 // Notify memory usage to MemoryService.
703 void track_metaspace_memory_usage();
704
706 // debugging support. 705 // debugging support.
707 706
708 void dump(outputStream* const out) const; 707 void dump(outputStream* const out) const;
709 void print_on(outputStream* st) const; 708 void print_on(outputStream* st) const;
710 void locked_print_chunks_in_use_on(outputStream* st) const; 709 void locked_print_chunks_in_use_on(outputStream* st) const;
715 #ifdef ASSERT 714 #ifdef ASSERT
716 void verify_allocated_blocks_words(); 715 void verify_allocated_blocks_words();
717 #endif 716 #endif
718 717
719 size_t get_raw_word_size(size_t word_size) { 718 size_t get_raw_word_size(size_t word_size) {
720 // If only the dictionary is going to be used (i.e., no
721 // indexed free list), then there is a minimum size requirement.
722 // MinChunkSize is a placeholder for the real minimum size JJJ
723 size_t byte_size = word_size * BytesPerWord; 719 size_t byte_size = word_size * BytesPerWord;
724 720
725 size_t byte_size_with_overhead = byte_size + Metablock::overhead(); 721 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
726 722 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
727 size_t raw_bytes_size = MAX2(byte_size_with_overhead, 723
728 Metablock::min_block_byte_size());
729 raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
730 size_t raw_word_size = raw_bytes_size / BytesPerWord; 724 size_t raw_word_size = raw_bytes_size / BytesPerWord;
731 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 725 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
732 726
733 return raw_word_size; 727 return raw_word_size;
734 } 728 }
747 void VirtualSpaceNode::inc_container_count() { 741 void VirtualSpaceNode::inc_container_count() {
748 assert_lock_strong(SpaceManager::expand_lock()); 742 assert_lock_strong(SpaceManager::expand_lock());
749 _container_count++; 743 _container_count++;
750 assert(_container_count == container_count_slow(), 744 assert(_container_count == container_count_slow(),
751 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT 745 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
752 "container_count_slow() " SIZE_FORMAT, 746 " container_count_slow() " SIZE_FORMAT,
753 _container_count, container_count_slow())); 747 _container_count, container_count_slow()));
754 } 748 }
755 749
756 void VirtualSpaceNode::dec_container_count() { 750 void VirtualSpaceNode::dec_container_count() {
757 assert_lock_strong(SpaceManager::expand_lock()); 751 assert_lock_strong(SpaceManager::expand_lock());
760 754
761 #ifdef ASSERT 755 #ifdef ASSERT
762 void VirtualSpaceNode::verify_container_count() { 756 void VirtualSpaceNode::verify_container_count() {
763 assert(_container_count == container_count_slow(), 757 assert(_container_count == container_count_slow(),
764 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT 758 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
765 "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); 759 " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
766 } 760 }
767 #endif 761 #endif
768 762
769 // BlockFreelist methods 763 // BlockFreelist methods
770 764
777 } 771 }
778 delete _dictionary; 772 delete _dictionary;
779 } 773 }
780 } 774 }
781 775
782 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
783 Metablock* block = (Metablock*) p;
784 block->set_word_size(word_size);
785 block->set_prev(NULL);
786 block->set_next(NULL);
787
788 return block;
789 }
790
791 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 776 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
792 Metablock* free_chunk = initialize_free_chunk(p, word_size); 777 Metablock* free_chunk = ::new (p) Metablock(word_size);
793 if (dictionary() == NULL) { 778 if (dictionary() == NULL) {
794 _dictionary = new BlockTreeDictionary(); 779 _dictionary = new BlockTreeDictionary();
795 } 780 }
796 dictionary()->return_chunk(free_chunk); 781 dictionary()->return_chunk(free_chunk);
797 } 782 }
805 // Dark matter. Too small for dictionary. 790 // Dark matter. Too small for dictionary.
806 return NULL; 791 return NULL;
807 } 792 }
808 793
809 Metablock* free_block = 794 Metablock* free_block =
810 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly); 795 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
811 if (free_block == NULL) { 796 if (free_block == NULL) {
812 return NULL; 797 return NULL;
813 } 798 }
814 799
815 return (MetaWord*) free_block; 800 const size_t block_size = free_block->size();
801 if (block_size > WasteMultiplier * word_size) {
802 return_block((MetaWord*)free_block, block_size);
803 return NULL;
804 }
805
806 MetaWord* new_block = (MetaWord*)free_block;
807 assert(block_size >= word_size, "Incorrect size of block from freelist");
808 const size_t unused = block_size - word_size;
809 if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
810 return_block(new_block + word_size, unused);
811 }
812
813 return new_block;
816 } 814 }
817 815
818 void BlockFreelist::print_on(outputStream* st) const { 816 void BlockFreelist::print_on(outputStream* st) const {
819 if (dictionary() == NULL) { 817 if (dictionary() == NULL) {
820 return; 818 return;
851 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 849 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
852 // Bottom of the new chunk 850 // Bottom of the new chunk
853 MetaWord* chunk_limit = top(); 851 MetaWord* chunk_limit = top();
854 assert(chunk_limit != NULL, "Not safe to call this method"); 852 assert(chunk_limit != NULL, "Not safe to call this method");
855 853
854 // The virtual spaces are always expanded by the
855 // commit granularity to enforce the following condition.
856 // Without this the is_available check will not work correctly.
857 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
858 "The committed memory doesn't match the expanded memory.");
859
856 if (!is_available(chunk_word_size)) { 860 if (!is_available(chunk_word_size)) {
857 if (TraceMetadataChunkAllocation) { 861 if (TraceMetadataChunkAllocation) {
858 tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); 862 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
859 // Dump some information about the virtual space that is nearly full 863 // Dump some information about the virtual space that is nearly full
860 print_on(tty); 864 print_on(gclog_or_tty);
861 } 865 }
862 return NULL; 866 return NULL;
863 } 867 }
864 868
865 // Take the space (bump top on the current virtual space). 869 // Take the space (bump top on the current virtual space).
870 return result; 874 return result;
871 } 875 }
872 876
873 877
874 // Expand the virtual space (commit more of the reserved space) 878 // Expand the virtual space (commit more of the reserved space)
875 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) { 879 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
876 size_t bytes = words * BytesPerWord; 880 size_t min_bytes = min_words * BytesPerWord;
877 bool result = virtual_space()->expand_by(bytes, pre_touch); 881 size_t preferred_bytes = preferred_words * BytesPerWord;
878 if (TraceMetavirtualspaceAllocation && !result) { 882
879 gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed " 883 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
880 "for byte size " SIZE_FORMAT, bytes); 884
881 virtual_space()->print(); 885 if (uncommitted < min_bytes) {
882 } 886 return false;
887 }
888
889 size_t commit = MIN2(preferred_bytes, uncommitted);
890 bool result = virtual_space()->expand_by(commit, false);
891
892 assert(result, "Failed to commit memory");
893
883 return result; 894 return result;
884 } 895 }
885
886 // Shrink the virtual space (commit more of the reserved space)
887 bool VirtualSpaceNode::shrink_by(size_t words) {
888 size_t bytes = words * BytesPerWord;
889 virtual_space()->shrink_by(bytes);
890 return true;
891 }
892
893 // Add another chunk to the chunk list.
894 896
895 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 897 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
896 assert_lock_strong(SpaceManager::expand_lock()); 898 assert_lock_strong(SpaceManager::expand_lock());
897 Metachunk* result = take_from_committed(chunk_word_size); 899 Metachunk* result = take_from_committed(chunk_word_size);
898 if (result != NULL) { 900 if (result != NULL) {
899 inc_container_count(); 901 inc_container_count();
900 } 902 }
901 return result; 903 return result;
902 } 904 }
903 905
904 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
905 assert_lock_strong(SpaceManager::expand_lock());
906
907 Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
908
909 if (new_chunk == NULL) {
910 // Only a small part of the virtualspace is committed when first
911 // allocated so committing more here can be expected.
912 size_t page_size_words = os::vm_page_size() / BytesPerWord;
913 size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
914 page_size_words);
915 expand_by(aligned_expand_vs_by_words, false);
916 new_chunk = get_chunk_vs(chunk_word_size);
917 }
918 return new_chunk;
919 }
920
921 bool VirtualSpaceNode::initialize() { 906 bool VirtualSpaceNode::initialize() {
922 907
923 if (!_rs.is_reserved()) { 908 if (!_rs.is_reserved()) {
924 return false; 909 return false;
925 } 910 }
926 911
927 // An allocation out of this Virtualspace that is larger 912 // These are necessary restriction to make sure that the virtual space always
928 // than an initial commit size can waste that initial committed 913 // grows in steps of Metaspace::commit_alignment(). If both base and size are
929 // space. 914 // aligned only the middle alignment of the VirtualSpace is used.
930 size_t committed_byte_size = 0; 915 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
931 bool result = virtual_space()->initialize(_rs, committed_byte_size); 916 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
917
918 // ReservedSpaces marked as special will have the entire memory
919 // pre-committed. Setting a committed size will make sure that
920 // committed_size and actual_committed_size agrees.
921 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
922
923 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
924 Metaspace::commit_alignment());
932 if (result) { 925 if (result) {
926 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
927 "Checking that the pre-committed memory was registered by the VirtualSpace");
928
933 set_top((MetaWord*)virtual_space()->low()); 929 set_top((MetaWord*)virtual_space()->low());
934 set_reserved(MemRegion((HeapWord*)_rs.base(), 930 set_reserved(MemRegion((HeapWord*)_rs.base(),
935 (HeapWord*)(_rs.base() + _rs.size()))); 931 (HeapWord*)(_rs.base() + _rs.size())));
936 932
937 assert(reserved()->start() == (HeapWord*) _rs.base(), 933 assert(reserved()->start() == (HeapWord*) _rs.base(),
975 VirtualSpaceNode* vsl = iter.get_next(); 971 VirtualSpaceNode* vsl = iter.get_next();
976 delete vsl; 972 delete vsl;
977 } 973 }
978 } 974 }
979 975
980 void VirtualSpaceList::inc_virtual_space_total(size_t v) { 976 void VirtualSpaceList::inc_reserved_words(size_t v) {
981 assert_lock_strong(SpaceManager::expand_lock()); 977 assert_lock_strong(SpaceManager::expand_lock());
982 _virtual_space_total = _virtual_space_total + v; 978 _reserved_words = _reserved_words + v;
983 } 979 }
984 void VirtualSpaceList::dec_virtual_space_total(size_t v) { 980 void VirtualSpaceList::dec_reserved_words(size_t v) {
985 assert_lock_strong(SpaceManager::expand_lock()); 981 assert_lock_strong(SpaceManager::expand_lock());
986 _virtual_space_total = _virtual_space_total - v; 982 _reserved_words = _reserved_words - v;
983 }
984
985 #define assert_committed_below_limit() \
986 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
987 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
988 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \
989 MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
990
991 void VirtualSpaceList::inc_committed_words(size_t v) {
992 assert_lock_strong(SpaceManager::expand_lock());
993 _committed_words = _committed_words + v;
994
995 assert_committed_below_limit();
996 }
997 void VirtualSpaceList::dec_committed_words(size_t v) {
998 assert_lock_strong(SpaceManager::expand_lock());
999 _committed_words = _committed_words - v;
1000
1001 assert_committed_below_limit();
987 } 1002 }
988 1003
989 void VirtualSpaceList::inc_virtual_space_count() { 1004 void VirtualSpaceList::inc_virtual_space_count() {
990 assert_lock_strong(SpaceManager::expand_lock()); 1005 assert_lock_strong(SpaceManager::expand_lock());
991 _virtual_space_count++; 1006 _virtual_space_count++;
1003 } else { 1018 } else {
1004 humongous_dictionary()->remove_chunk(chunk); 1019 humongous_dictionary()->remove_chunk(chunk);
1005 } 1020 }
1006 1021
1007 // Chunk is being removed from the chunks free list. 1022 // Chunk is being removed from the chunks free list.
1008 dec_free_chunks_total(chunk->capacity_word_size()); 1023 dec_free_chunks_total(chunk->word_size());
1009 } 1024 }
1010 1025
1011 // Walk the list of VirtualSpaceNodes and delete 1026 // Walk the list of VirtualSpaceNodes and delete
1012 // nodes with a 0 container_count. Remove Metachunks in 1027 // nodes with a 0 container_count. Remove Metachunks in
1013 // the node from their respective freelists. 1028 // the node from their respective freelists.
1014 void VirtualSpaceList::purge() { 1029 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1015 assert_lock_strong(SpaceManager::expand_lock()); 1030 assert_lock_strong(SpaceManager::expand_lock());
1016 // Don't use a VirtualSpaceListIterator because this 1031 // Don't use a VirtualSpaceListIterator because this
1017 // list is being changed and a straightforward use of an iterator is not safe. 1032 // list is being changed and a straightforward use of an iterator is not safe.
1018 VirtualSpaceNode* purged_vsl = NULL; 1033 VirtualSpaceNode* purged_vsl = NULL;
1019 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1034 VirtualSpaceNode* prev_vsl = virtual_space_list();
1024 // Don't free the current virtual space since it will likely 1039 // Don't free the current virtual space since it will likely
1025 // be needed soon. 1040 // be needed soon.
1026 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1041 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1027 // Unlink it from the list 1042 // Unlink it from the list
1028 if (prev_vsl == vsl) { 1043 if (prev_vsl == vsl) {
1029 // This is the case of the current note being the first note. 1044 // This is the case of the current node being the first node.
1030 assert(vsl == virtual_space_list(), "Expected to be the first note"); 1045 assert(vsl == virtual_space_list(), "Expected to be the first node");
1031 set_virtual_space_list(vsl->next()); 1046 set_virtual_space_list(vsl->next());
1032 } else { 1047 } else {
1033 prev_vsl->set_next(vsl->next()); 1048 prev_vsl->set_next(vsl->next());
1034 } 1049 }
1035 1050
1036 vsl->purge(chunk_manager()); 1051 vsl->purge(chunk_manager);
1037 dec_virtual_space_total(vsl->reserved()->word_size()); 1052 dec_reserved_words(vsl->reserved_words());
1053 dec_committed_words(vsl->committed_words());
1038 dec_virtual_space_count(); 1054 dec_virtual_space_count();
1039 purged_vsl = vsl; 1055 purged_vsl = vsl;
1040 delete vsl; 1056 delete vsl;
1041 } else { 1057 } else {
1042 prev_vsl = vsl; 1058 prev_vsl = vsl;
1052 } 1068 }
1053 } 1069 }
1054 #endif 1070 #endif
1055 } 1071 }
1056 1072
1057 size_t VirtualSpaceList::used_words_sum() { 1073 void VirtualSpaceList::retire_current_virtual_space() {
1058 size_t allocated_by_vs = 0; 1074 assert_lock_strong(SpaceManager::expand_lock());
1059 VirtualSpaceListIterator iter(virtual_space_list()); 1075
1060 while (iter.repeat()) { 1076 VirtualSpaceNode* vsn = current_virtual_space();
1061 VirtualSpaceNode* vsl = iter.get_next(); 1077
1062 // Sum used region [bottom, top) in each virtualspace 1078 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1063 allocated_by_vs += vsl->used_words_in_vs(); 1079 Metaspace::chunk_manager_metadata();
1064 } 1080
1065 assert(allocated_by_vs >= chunk_manager()->free_chunks_total(), 1081 vsn->retire(cm);
1066 err_msg("Total in free chunks " SIZE_FORMAT 1082 }
1067 " greater than total from virtual_spaces " SIZE_FORMAT, 1083
1068 allocated_by_vs, chunk_manager()->free_chunks_total())); 1084 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1069 size_t used = 1085 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1070 allocated_by_vs - chunk_manager()->free_chunks_total(); 1086 ChunkIndex index = (ChunkIndex)i;
1071 return used; 1087 size_t chunk_size = chunk_manager->free_chunks(index)->size();
1072 } 1088
1073 1089 while (free_words_in_vs() >= chunk_size) {
1074 // Space available in all MetadataVirtualspaces allocated 1090 DEBUG_ONLY(verify_container_count();)
1075 // for metadata. This is the upper limit on the capacity 1091 Metachunk* chunk = get_chunk_vs(chunk_size);
1076 // of chunks allocated out of all the MetadataVirtualspaces. 1092 assert(chunk != NULL, "allocation should have been successful");
1077 size_t VirtualSpaceList::capacity_words_sum() { 1093
1078 size_t capacity = 0; 1094 chunk_manager->return_chunks(index, chunk);
1079 VirtualSpaceListIterator iter(virtual_space_list()); 1095 chunk_manager->inc_free_chunks_total(chunk_size);
1080 while (iter.repeat()) { 1096 DEBUG_ONLY(verify_container_count();)
1081 VirtualSpaceNode* vsl = iter.get_next(); 1097 }
1082 capacity += vsl->capacity_words_in_vs(); 1098 }
1083 } 1099 assert(free_words_in_vs() == 0, "should be empty now");
1084 return capacity; 1100 }
1085 } 1101
1086 1102 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1087 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
1088 _is_class(false), 1103 _is_class(false),
1089 _virtual_space_list(NULL), 1104 _virtual_space_list(NULL),
1090 _current_virtual_space(NULL), 1105 _current_virtual_space(NULL),
1091 _virtual_space_total(0), 1106 _reserved_words(0),
1107 _committed_words(0),
1092 _virtual_space_count(0) { 1108 _virtual_space_count(0) {
1093 MutexLockerEx cl(SpaceManager::expand_lock(), 1109 MutexLockerEx cl(SpaceManager::expand_lock(),
1094 Mutex::_no_safepoint_check_flag); 1110 Mutex::_no_safepoint_check_flag);
1095 bool initialization_succeeded = grow_vs(word_size); 1111 create_new_virtual_space(word_size);
1096
1097 _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
1098 _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
1099 _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
1100 assert(initialization_succeeded,
1101 " VirtualSpaceList initialization should not fail");
1102 } 1112 }
1103 1113
1104 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1114 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1105 _is_class(true), 1115 _is_class(true),
1106 _virtual_space_list(NULL), 1116 _virtual_space_list(NULL),
1107 _current_virtual_space(NULL), 1117 _current_virtual_space(NULL),
1108 _virtual_space_total(0), 1118 _reserved_words(0),
1119 _committed_words(0),
1109 _virtual_space_count(0) { 1120 _virtual_space_count(0) {
1110 MutexLockerEx cl(SpaceManager::expand_lock(), 1121 MutexLockerEx cl(SpaceManager::expand_lock(),
1111 Mutex::_no_safepoint_check_flag); 1122 Mutex::_no_safepoint_check_flag);
1112 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1123 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1113 bool succeeded = class_entry->initialize(); 1124 bool succeeded = class_entry->initialize();
1114 _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk); 1125 if (succeeded) {
1115 _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk); 1126 link_vs(class_entry);
1116 _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk); 1127 }
1117 assert(succeeded, " VirtualSpaceList initialization should not fail");
1118 link_vs(class_entry, rs.size()/BytesPerWord);
1119 } 1128 }
1120 1129
1121 size_t VirtualSpaceList::free_bytes() { 1130 size_t VirtualSpaceList::free_bytes() {
1122 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1131 return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1123 } 1132 }
1124 1133
1125 // Allocate another meta virtual space and add it to the list. 1134 // Allocate another meta virtual space and add it to the list.
1126 bool VirtualSpaceList::grow_vs(size_t vs_word_size) { 1135 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1127 assert_lock_strong(SpaceManager::expand_lock()); 1136 assert_lock_strong(SpaceManager::expand_lock());
1137
1138 if (is_class()) {
1139 assert(false, "We currently don't support more than one VirtualSpace for"
1140 " the compressed class space. The initialization of the"
1141 " CCS uses another code path and should not hit this path.");
1142 return false;
1143 }
1144
1128 if (vs_word_size == 0) { 1145 if (vs_word_size == 0) {
1146 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1129 return false; 1147 return false;
1130 } 1148 }
1149
1131 // Reserve the space 1150 // Reserve the space
1132 size_t vs_byte_size = vs_word_size * BytesPerWord; 1151 size_t vs_byte_size = vs_word_size * BytesPerWord;
1133 assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned"); 1152 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1134 1153
1135 // Allocate the meta virtual space and initialize it. 1154 // Allocate the meta virtual space and initialize it.
1136 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1155 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1137 if (!new_entry->initialize()) { 1156 if (!new_entry->initialize()) {
1138 delete new_entry; 1157 delete new_entry;
1139 return false; 1158 return false;
1140 } else { 1159 } else {
1160 assert(new_entry->reserved_words() == vs_word_size,
1161 "Reserved memory size differs from requested memory size");
1141 // ensure lock-free iteration sees fully initialized node 1162 // ensure lock-free iteration sees fully initialized node
1142 OrderAccess::storestore(); 1163 OrderAccess::storestore();
1143 link_vs(new_entry, vs_word_size); 1164 link_vs(new_entry);
1144 return true; 1165 return true;
1145 } 1166 }
1146 } 1167 }
1147 1168
1148 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) { 1169 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1149 if (virtual_space_list() == NULL) { 1170 if (virtual_space_list() == NULL) {
1150 set_virtual_space_list(new_entry); 1171 set_virtual_space_list(new_entry);
1151 } else { 1172 } else {
1152 current_virtual_space()->set_next(new_entry); 1173 current_virtual_space()->set_next(new_entry);
1153 } 1174 }
1154 set_current_virtual_space(new_entry); 1175 set_current_virtual_space(new_entry);
1155 inc_virtual_space_total(vs_word_size); 1176 inc_reserved_words(new_entry->reserved_words());
1177 inc_committed_words(new_entry->committed_words());
1156 inc_virtual_space_count(); 1178 inc_virtual_space_count();
1157 #ifdef ASSERT 1179 #ifdef ASSERT
1158 new_entry->mangle(); 1180 new_entry->mangle();
1159 #endif 1181 #endif
1160 if (TraceMetavirtualspaceAllocation && Verbose) { 1182 if (TraceMetavirtualspaceAllocation && Verbose) {
1161 VirtualSpaceNode* vsl = current_virtual_space(); 1183 VirtualSpaceNode* vsl = current_virtual_space();
1162 vsl->print_on(tty); 1184 vsl->print_on(gclog_or_tty);
1163 } 1185 }
1186 }
1187
1188 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1189 size_t min_words,
1190 size_t preferred_words) {
1191 size_t before = node->committed_words();
1192
1193 bool result = node->expand_by(min_words, preferred_words);
1194
1195 size_t after = node->committed_words();
1196
1197 // after and before can be the same if the memory was pre-committed.
1198 assert(after >= before, "Inconsistency");
1199 inc_committed_words(after - before);
1200
1201 return result;
1202 }
1203
1204 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1205 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words());
1206 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1207 assert(min_words <= preferred_words, "Invalid arguments");
1208
1209 if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1210 return false;
1211 }
1212
1213 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1214 if (allowed_expansion_words < min_words) {
1215 return false;
1216 }
1217
1218 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1219
1220 // Commit more memory from the the current virtual space.
1221 bool vs_expanded = expand_node_by(current_virtual_space(),
1222 min_words,
1223 max_expansion_words);
1224 if (vs_expanded) {
1225 return true;
1226 }
1227 retire_current_virtual_space();
1228
1229 // Get another virtual space.
1230 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1231 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1232
1233 if (create_new_virtual_space(grow_vs_words)) {
1234 if (current_virtual_space()->is_pre_committed()) {
1235 // The memory was pre-committed, so we are done here.
1236 assert(min_words <= current_virtual_space()->committed_words(),
1237 "The new VirtualSpace was pre-committed, so it"
1238 "should be large enough to fit the alloc request.");
1239 return true;
1240 }
1241
1242 return expand_node_by(current_virtual_space(),
1243 min_words,
1244 max_expansion_words);
1245 }
1246
1247 return false;
1164 } 1248 }
1165 1249
1166 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 1250 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1167 size_t grow_chunks_by_words, 1251 size_t grow_chunks_by_words,
1168 size_t medium_chunk_bunch) { 1252 size_t medium_chunk_bunch) {
1169 1253
1170 // Get a chunk from the chunk freelist 1254 // Allocate a chunk out of the current virtual space.
1171 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); 1255 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1172 1256
1173 if (next != NULL) { 1257 if (next != NULL) {
1174 next->container()->inc_container_count(); 1258 return next;
1175 } else { 1259 }
1176 // Allocate a chunk out of the current virtual space. 1260
1261 // The expand amount is currently only determined by the requested sizes
1262 // and not how much committed memory is left in the current virtual space.
1263
1264 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1265 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words());
1266 if (min_word_size >= preferred_word_size) {
1267 // Can happen when humongous chunks are allocated.
1268 preferred_word_size = min_word_size;
1269 }
1270
1271 bool expanded = expand_by(min_word_size, preferred_word_size);
1272 if (expanded) {
1177 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1273 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1178 } 1274 assert(next != NULL, "The allocation was expected to succeed after the expansion");
1179 1275 }
1180 if (next == NULL) { 1276
1181 // Not enough room in current virtual space. Try to commit 1277 return next;
1182 // more space.
1183 size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
1184 grow_chunks_by_words);
1185 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1186 size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
1187 page_size_words);
1188 bool vs_expanded =
1189 current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
1190 if (!vs_expanded) {
1191 // Should the capacity of the metaspaces be expanded for
1192 // this allocation? If it's the virtual space for classes and is
1193 // being used for CompressedHeaders, don't allocate a new virtualspace.
1194 if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
1195 // Get another virtual space.
1196 size_t grow_vs_words =
1197 MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
1198 if (grow_vs(grow_vs_words)) {
1199 // Got it. It's on the list now. Get a chunk from it.
1200 next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
1201 }
1202 } else {
1203 // Allocation will fail and induce a GC
1204 if (TraceMetadataChunkAllocation && Verbose) {
1205 gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
1206 " Fail instead of expand the metaspace");
1207 }
1208 }
1209 } else {
1210 // The virtual space expanded, get a new chunk
1211 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1212 assert(next != NULL, "Just expanded, should succeed");
1213 }
1214 }
1215
1216 assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
1217 "New chunk is still on some list");
1218 return next;
1219 }
1220
1221 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
1222 size_t chunk_bunch) {
1223 // Get a chunk from the chunk freelist
1224 Metachunk* new_chunk = get_new_chunk(chunk_word_size,
1225 chunk_word_size,
1226 chunk_bunch);
1227 return new_chunk;
1228 } 1278 }
1229 1279
1230 void VirtualSpaceList::print_on(outputStream* st) const { 1280 void VirtualSpaceList::print_on(outputStream* st) const {
1231 if (TraceMetadataChunkAllocation && Verbose) { 1281 if (TraceMetadataChunkAllocation && Verbose) {
1232 VirtualSpaceListIterator iter(virtual_space_list()); 1282 VirtualSpaceListIterator iter(virtual_space_list());
1271 // the HWM. 1321 // the HWM.
1272 1322
1273 // Calculate the amount to increase the high water mark (HWM). 1323 // Calculate the amount to increase the high water mark (HWM).
1274 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1324 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1275 // another expansion is not requested too soon. If that is not 1325 // another expansion is not requested too soon. If that is not
1276 // enough to satisfy the allocation (i.e. big enough for a word_size 1326 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1277 // allocation), increase by MaxMetaspaceExpansion. If that is still 1327 // If that is still not enough, expand by the size of the allocation
1278 // not enough, expand by the size of the allocation (word_size) plus 1328 // plus some.
1279 // some. 1329 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1280 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) { 1330 size_t min_delta = MinMetaspaceExpansion;
1281 size_t before_inc = MetaspaceGC::capacity_until_GC(); 1331 size_t max_delta = MaxMetaspaceExpansion;
1282 size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord; 1332 size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1283 size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord; 1333
1284 size_t page_size_words = os::vm_page_size() / BytesPerWord; 1334 if (delta <= min_delta) {
1285 size_t size_delta_words = align_size_up(word_size, page_size_words); 1335 delta = min_delta;
1286 size_t delta_words = MAX2(size_delta_words, min_delta_words); 1336 } else if (delta <= max_delta) {
1287 if (delta_words > min_delta_words) {
1288 // Don't want to hit the high water mark on the next 1337 // Don't want to hit the high water mark on the next
1289 // allocation so make the delta greater than just enough 1338 // allocation so make the delta greater than just enough
1290 // for this allocation. 1339 // for this allocation.
1291 delta_words = MAX2(delta_words, max_delta_words); 1340 delta = max_delta;
1292 if (delta_words > max_delta_words) { 1341 } else {
1293 // This allocation is large but the next ones are probably not 1342 // This allocation is large but the next ones are probably not
1294 // so increase by the minimum. 1343 // so increase by the minimum.
1295 delta_words = delta_words + min_delta_words; 1344 delta = delta + min_delta;
1296 } 1345 }
1297 } 1346
1298 return delta_words; 1347 assert_is_size_aligned(delta, Metaspace::commit_alignment());
1299 } 1348
1300 1349 return delta;
1301 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { 1350 }
1302 1351
1303 // If the user wants a limit, impose one. 1352 size_t MetaspaceGC::capacity_until_GC() {
1304 // The reason for someone using this flag is to limit reserved space. So 1353 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1305 // for non-class virtual space, compare against virtual spaces that are reserved. 1354 assert(value >= MetaspaceSize, "Not initialied properly?");
1306 // For class virtual space, we only compare against the committed space, not 1355 return value;
1307 // reserved space, because this is a larger space prereserved for compressed 1356 }
1308 // class pointers. 1357
1309 if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { 1358 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
1310 size_t real_allocated = Metaspace::space_list()->virtual_space_total() + 1359 assert_is_size_aligned(v, Metaspace::commit_alignment());
1311 MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); 1360
1312 if (real_allocated >= MaxMetaspaceSize) { 1361 return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
1362 }
1363
1364 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1365 assert_is_size_aligned(v, Metaspace::commit_alignment());
1366
1367 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1368 }
1369
1370 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1371 // Check if the compressed class space is full.
1372 if (is_class && Metaspace::using_class_space()) {
1373 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1374 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1313 return false; 1375 return false;
1314 } 1376 }
1315 } 1377 }
1316 1378
1317 // Class virtual space should always be expanded. Call GC for the other 1379 // Check if the user has imposed a limit on the metaspace memory.
1318 // metadata virtual space. 1380 size_t committed_bytes = MetaspaceAux::committed_bytes();
1319 if (Metaspace::using_class_space() && 1381 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1320 (vsl == Metaspace::class_space_list())) return true; 1382 return false;
1321 1383 }
1322 // If this is part of an allocation after a GC, expand 1384
1323 // unconditionally. 1385 return true;
1324 if (MetaspaceGC::expand_after_GC()) { 1386 }
1325 return true; 1387
1326 } 1388 size_t MetaspaceGC::allowed_expansion() {
1327 1389 size_t committed_bytes = MetaspaceAux::committed_bytes();
1328 1390
1329 // If the capacity is below the minimum capacity, allow the 1391 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
1330 // expansion. Also set the high-water-mark (capacity_until_GC) 1392
1331 // to that minimum capacity so that a GC will not be induced 1393 // Always grant expansion if we are initiating the JVM,
1332 // until that minimum capacity is exceeded. 1394 // or if the GC_locker is preventing GCs.
1333 size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); 1395 if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
1334 size_t metaspace_size_bytes = MetaspaceSize; 1396 return left_until_max / BytesPerWord;
1335 if (committed_capacity_bytes < metaspace_size_bytes || 1397 }
1336 capacity_until_GC() == 0) { 1398
1337 set_capacity_until_GC(metaspace_size_bytes); 1399 size_t capacity_until_gc = capacity_until_GC();
1338 return true; 1400
1339 } else { 1401 if (capacity_until_gc <= committed_bytes) {
1340 if (committed_capacity_bytes < capacity_until_GC()) { 1402 return 0;
1341 return true; 1403 }
1342 } else { 1404
1343 if (TraceMetadataChunkAllocation && Verbose) { 1405 size_t left_until_GC = capacity_until_gc - committed_bytes;
1344 gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT 1406 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1345 " capacity_until_GC " SIZE_FORMAT 1407
1346 " allocated_capacity_bytes " SIZE_FORMAT, 1408 return left_to_commit / BytesPerWord;
1347 word_size, 1409 }
1348 capacity_until_GC(),
1349 MetaspaceAux::allocated_capacity_bytes());
1350 }
1351 return false;
1352 }
1353 }
1354 }
1355
1356
1357 1410
1358 void MetaspaceGC::compute_new_size() { 1411 void MetaspaceGC::compute_new_size() {
1359 assert(_shrink_factor <= 100, "invalid shrink factor"); 1412 assert(_shrink_factor <= 100, "invalid shrink factor");
1360 uint current_shrink_factor = _shrink_factor; 1413 uint current_shrink_factor = _shrink_factor;
1361 _shrink_factor = 0; 1414 _shrink_factor = 0;
1362 1415
1363 // Until a faster way of calculating the "used" quantity is implemented,
1364 // use "capacity".
1365 const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); 1416 const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1366 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1417 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1367 1418
1368 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1419 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1369 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1420 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1391 size_t shrink_bytes = 0; 1442 size_t shrink_bytes = 0;
1392 if (capacity_until_GC < minimum_desired_capacity) { 1443 if (capacity_until_GC < minimum_desired_capacity) {
1393 // If we have less capacity below the metaspace HWM, then 1444 // If we have less capacity below the metaspace HWM, then
1394 // increment the HWM. 1445 // increment the HWM.
1395 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1446 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1447 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1396 // Don't expand unless it's significant 1448 // Don't expand unless it's significant
1397 if (expand_bytes >= MinMetaspaceExpansion) { 1449 if (expand_bytes >= MinMetaspaceExpansion) {
1398 MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); 1450 MetaspaceGC::inc_capacity_until_GC(expand_bytes);
1399 } 1451 }
1400 if (PrintGCDetails && Verbose) { 1452 if (PrintGCDetails && Verbose) {
1401 size_t new_capacity_until_GC = capacity_until_GC; 1453 size_t new_capacity_until_GC = capacity_until_GC;
1402 gclog_or_tty->print_cr(" expanding:" 1454 gclog_or_tty->print_cr(" expanding:"
1403 " minimum_desired_capacity: %6.1fKB" 1455 " minimum_desired_capacity: %6.1fKB"
1450 // we'd just have to grow the heap up again for the next phase. So we 1502 // we'd just have to grow the heap up again for the next phase. So we
1451 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1503 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1452 // on the third call, and 100% by the fourth call. But if we recompute 1504 // on the third call, and 100% by the fourth call. But if we recompute
1453 // size without shrinking, it goes back to 0%. 1505 // size without shrinking, it goes back to 0%.
1454 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1506 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1507
1508 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1509
1455 assert(shrink_bytes <= max_shrink_bytes, 1510 assert(shrink_bytes <= max_shrink_bytes,
1456 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1511 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1457 shrink_bytes, max_shrink_bytes)); 1512 shrink_bytes, max_shrink_bytes));
1458 if (current_shrink_factor == 0) { 1513 if (current_shrink_factor == 0) {
1459 _shrink_factor = 10; 1514 _shrink_factor = 10;
1481 } 1536 }
1482 1537
1483 // Don't shrink unless it's significant 1538 // Don't shrink unless it's significant
1484 if (shrink_bytes >= MinMetaspaceExpansion && 1539 if (shrink_bytes >= MinMetaspaceExpansion &&
1485 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1540 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1486 MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); 1541 MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1487 } 1542 }
1488 } 1543 }
1489 1544
1490 // Metadebug methods 1545 // Metadebug methods
1491
1492 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1493 size_t chunk_word_size){
1494 #ifdef ASSERT
1495 VirtualSpaceList* vsl = sm->vs_list();
1496 if (MetaDataDeallocateALot &&
1497 Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1498 Metadebug::reset_deallocate_chunk_a_lot_count();
1499 for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1500 Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1501 if (dummy_chunk == NULL) {
1502 break;
1503 }
1504 vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1505
1506 if (TraceMetadataChunkAllocation && Verbose) {
1507 gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1508 sm->sum_count_in_chunks_in_use());
1509 dummy_chunk->print_on(gclog_or_tty);
1510 gclog_or_tty->print_cr(" Free chunks total %d count %d",
1511 vsl->chunk_manager()->free_chunks_total(),
1512 vsl->chunk_manager()->free_chunks_count());
1513 }
1514 }
1515 } else {
1516 Metadebug::inc_deallocate_chunk_a_lot_count();
1517 }
1518 #endif
1519 }
1520
1521 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1522 size_t raw_word_size){
1523 #ifdef ASSERT
1524 if (MetaDataDeallocateALot &&
1525 Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1526 Metadebug::set_deallocate_block_a_lot_count(0);
1527 for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1528 MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1529 if (dummy_block == 0) {
1530 break;
1531 }
1532 sm->deallocate(dummy_block, raw_word_size);
1533 }
1534 } else {
1535 Metadebug::inc_deallocate_block_a_lot_count();
1536 }
1537 #endif
1538 }
1539 1546
1540 void Metadebug::init_allocation_fail_alot_count() { 1547 void Metadebug::init_allocation_fail_alot_count() {
1541 if (MetadataAllocationFailALot) { 1548 if (MetadataAllocationFailALot) {
1542 _allocation_fail_alot_count = 1549 _allocation_fail_alot_count =
1543 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1550 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1563 } 1570 }
1564 #endif 1571 #endif
1565 1572
1566 // ChunkManager methods 1573 // ChunkManager methods
1567 1574
1568 size_t ChunkManager::free_chunks_total() { 1575 size_t ChunkManager::free_chunks_total_words() {
1569 return _free_chunks_total; 1576 return _free_chunks_total;
1570 } 1577 }
1571 1578
1572 size_t ChunkManager::free_chunks_total_in_bytes() { 1579 size_t ChunkManager::free_chunks_total_bytes() {
1573 return free_chunks_total() * BytesPerWord; 1580 return free_chunks_total_words() * BytesPerWord;
1574 } 1581 }
1575 1582
1576 size_t ChunkManager::free_chunks_count() { 1583 size_t ChunkManager::free_chunks_count() {
1577 #ifdef ASSERT 1584 #ifdef ASSERT
1578 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1585 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1678 ChunkIndex index = list_index(word_size); 1685 ChunkIndex index = list_index(word_size);
1679 assert(index < HumongousIndex, "No humongous list"); 1686 assert(index < HumongousIndex, "No humongous list");
1680 return free_chunks(index); 1687 return free_chunks(index);
1681 } 1688 }
1682 1689
1683 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1684 assert_lock_strong(SpaceManager::expand_lock());
1685 ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1686 chunk->set_next(free_list->head());
1687 free_list->set_head(chunk);
1688 // chunk is being returned to the chunk free list
1689 inc_free_chunks_total(chunk->capacity_word_size());
1690 slow_locked_verify();
1691 }
1692
1693 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1694 // The deallocation of a chunk originates in the freelist
1695 // manangement code for a Metaspace and does not hold the
1696 // lock.
1697 assert(chunk != NULL, "Deallocating NULL");
1698 assert_lock_strong(SpaceManager::expand_lock());
1699 slow_locked_verify();
1700 if (TraceMetadataChunkAllocation) {
1701 tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1702 PTR_FORMAT " size " SIZE_FORMAT,
1703 chunk, chunk->word_size());
1704 }
1705 free_chunks_put(chunk);
1706 }
1707
1708 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1690 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1709 assert_lock_strong(SpaceManager::expand_lock()); 1691 assert_lock_strong(SpaceManager::expand_lock());
1710 1692
1711 slow_locked_verify(); 1693 slow_locked_verify();
1712 1694
1714 if (list_index(word_size) != HumongousIndex) { 1696 if (list_index(word_size) != HumongousIndex) {
1715 ChunkList* free_list = find_free_chunks_list(word_size); 1697 ChunkList* free_list = find_free_chunks_list(word_size);
1716 assert(free_list != NULL, "Sanity check"); 1698 assert(free_list != NULL, "Sanity check");
1717 1699
1718 chunk = free_list->head(); 1700 chunk = free_list->head();
1719 debug_only(Metachunk* debug_head = chunk;)
1720 1701
1721 if (chunk == NULL) { 1702 if (chunk == NULL) {
1722 return NULL; 1703 return NULL;
1723 } 1704 }
1724 1705
1725 // Remove the chunk as the head of the list. 1706 // Remove the chunk as the head of the list.
1726 free_list->remove_chunk(chunk); 1707 free_list->remove_chunk(chunk);
1727 1708
1728 // Chunk is being removed from the chunks free list.
1729 dec_free_chunks_total(chunk->capacity_word_size());
1730
1731 if (TraceMetadataChunkAllocation && Verbose) { 1709 if (TraceMetadataChunkAllocation && Verbose) {
1732 tty->print_cr("ChunkManager::free_chunks_get: free_list " 1710 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1733 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1711 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1734 free_list, chunk, chunk->word_size()); 1712 free_list, chunk, chunk->word_size());
1735 } 1713 }
1736 } else { 1714 } else {
1737 chunk = humongous_dictionary()->get_chunk( 1715 chunk = humongous_dictionary()->get_chunk(
1738 word_size, 1716 word_size,
1739 FreeBlockDictionary<Metachunk>::atLeast); 1717 FreeBlockDictionary<Metachunk>::atLeast);
1740 1718
1741 if (chunk != NULL) { 1719 if (chunk == NULL) {
1742 if (TraceMetadataHumongousAllocation) {
1743 size_t waste = chunk->word_size() - word_size;
1744 tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
1745 " for requested size " SIZE_FORMAT
1746 " waste " SIZE_FORMAT,
1747 chunk->word_size(), word_size, waste);
1748 }
1749 // Chunk is being removed from the chunks free list.
1750 dec_free_chunks_total(chunk->capacity_word_size());
1751 } else {
1752 return NULL; 1720 return NULL;
1753 } 1721 }
1754 } 1722
1723 if (TraceMetadataHumongousAllocation) {
1724 size_t waste = chunk->word_size() - word_size;
1725 gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1726 SIZE_FORMAT " for requested size " SIZE_FORMAT
1727 " waste " SIZE_FORMAT,
1728 chunk->word_size(), word_size, waste);
1729 }
1730 }
1731
1732 // Chunk is being removed from the chunks free list.
1733 dec_free_chunks_total(chunk->word_size());
1755 1734
1756 // Remove it from the links to this freelist 1735 // Remove it from the links to this freelist
1757 chunk->set_next(NULL); 1736 chunk->set_next(NULL);
1758 chunk->set_prev(NULL); 1737 chunk->set_prev(NULL);
1759 #ifdef ASSERT 1738 #ifdef ASSERT
1760 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1739 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1761 // work. 1740 // work.
1762 chunk->set_is_free(false); 1741 chunk->set_is_tagged_free(false);
1763 #endif 1742 #endif
1743 chunk->container()->inc_container_count();
1744
1764 slow_locked_verify(); 1745 slow_locked_verify();
1765 return chunk; 1746 return chunk;
1766 } 1747 }
1767 1748
1768 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1749 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1784 ChunkList* list = find_free_chunks_list(word_size); 1765 ChunkList* list = find_free_chunks_list(word_size);
1785 list_count = list->count(); 1766 list_count = list->count();
1786 } else { 1767 } else {
1787 list_count = humongous_dictionary()->total_count(); 1768 list_count = humongous_dictionary()->total_count();
1788 } 1769 }
1789 tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " 1770 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1790 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1771 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1791 this, chunk, chunk->word_size(), list_count); 1772 this, chunk, chunk->word_size(), list_count);
1792 locked_print_free_chunks(tty); 1773 locked_print_free_chunks(gclog_or_tty);
1793 } 1774 }
1794 1775
1795 return chunk; 1776 return chunk;
1796 } 1777 }
1797 1778
1798 void ChunkManager::print_on(outputStream* out) { 1779 void ChunkManager::print_on(outputStream* out) const {
1799 if (PrintFLSStatistics != 0) { 1780 if (PrintFLSStatistics != 0) {
1800 humongous_dictionary()->report_statistics(); 1781 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1801 } 1782 }
1802 } 1783 }
1803 1784
1804 // SpaceManager methods 1785 // SpaceManager methods
1805 1786
1887 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1868 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1888 size_t sum = 0; 1869 size_t sum = 0;
1889 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1870 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1890 Metachunk* chunk = chunks_in_use(i); 1871 Metachunk* chunk = chunks_in_use(i);
1891 while (chunk != NULL) { 1872 while (chunk != NULL) {
1892 sum += chunk->capacity_word_size(); 1873 sum += chunk->word_size();
1893 chunk = chunk->next(); 1874 chunk = chunk->next();
1894 } 1875 }
1895 } 1876 }
1896 return sum; 1877 return sum;
1897 } 1878 }
1942 } else { 1923 } else {
1943 st->print_cr(""); 1924 st->print_cr("");
1944 } 1925 }
1945 } 1926 }
1946 1927
1947 vs_list()->chunk_manager()->locked_print_free_chunks(st); 1928 chunk_manager()->locked_print_free_chunks(st);
1948 vs_list()->chunk_manager()->locked_print_sum_free_chunks(st); 1929 chunk_manager()->locked_print_sum_free_chunks(st);
1949 } 1930 }
1950 1931
1951 size_t SpaceManager::calc_chunk_size(size_t word_size) { 1932 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1952 1933
1953 // Decide between a small chunk and a medium chunk. Up to 1934 // Decide between a small chunk and a medium chunk. Up to
1963 } 1944 }
1964 } else { 1945 } else {
1965 chunk_word_size = medium_chunk_size(); 1946 chunk_word_size = medium_chunk_size();
1966 } 1947 }
1967 1948
1968 // Might still need a humongous chunk. Enforce an 1949 // Might still need a humongous chunk. Enforce
1969 // eight word granularity to facilitate reuse (some 1950 // humongous allocations sizes to be aligned up to
1970 // wastage but better chance of reuse). 1951 // the smallest chunk size.
1971 size_t if_humongous_sized_chunk = 1952 size_t if_humongous_sized_chunk =
1972 align_size_up(word_size + Metachunk::overhead(), 1953 align_size_up(word_size + Metachunk::overhead(),
1973 HumongousChunkGranularity); 1954 smallest_chunk_size());
1974 chunk_word_size = 1955 chunk_word_size =
1975 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 1956 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1976 1957
1977 assert(!SpaceManager::is_humongous(word_size) || 1958 assert(!SpaceManager::is_humongous(word_size) ||
1978 chunk_word_size == if_humongous_sized_chunk, 1959 chunk_word_size == if_humongous_sized_chunk,
1989 Metachunk::overhead()); 1970 Metachunk::overhead());
1990 } 1971 }
1991 return chunk_word_size; 1972 return chunk_word_size;
1992 } 1973 }
1993 1974
1975 void SpaceManager::track_metaspace_memory_usage() {
1976 if (is_init_completed()) {
1977 if (is_class()) {
1978 MemoryService::track_compressed_class_memory_usage();
1979 }
1980 MemoryService::track_metaspace_memory_usage();
1981 }
1982 }
1983
1994 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 1984 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1995 assert(vs_list()->current_virtual_space() != NULL, 1985 assert(vs_list()->current_virtual_space() != NULL,
1996 "Should have been set"); 1986 "Should have been set");
1997 assert(current_chunk() == NULL || 1987 assert(current_chunk() == NULL ||
1998 current_chunk()->allocate(word_size) == NULL, 1988 current_chunk()->allocate(word_size) == NULL,
2014 2004
2015 // Get another chunk out of the virtual space 2005 // Get another chunk out of the virtual space
2016 size_t grow_chunks_by_words = calc_chunk_size(word_size); 2006 size_t grow_chunks_by_words = calc_chunk_size(word_size);
2017 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 2007 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2018 2008
2009 MetaWord* mem = NULL;
2010
2019 // If a chunk was available, add it to the in-use chunk list 2011 // If a chunk was available, add it to the in-use chunk list
2020 // and do an allocation from it. 2012 // and do an allocation from it.
2021 if (next != NULL) { 2013 if (next != NULL) {
2022 Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
2023 // Add to this manager's list of chunks in use. 2014 // Add to this manager's list of chunks in use.
2024 add_chunk(next, false); 2015 add_chunk(next, false);
2025 return next->allocate(word_size); 2016 mem = next->allocate(word_size);
2026 } 2017 }
2027 return NULL; 2018
2019 // Track metaspace memory usage statistic.
2020 track_metaspace_memory_usage();
2021
2022 return mem;
2028 } 2023 }
2029 2024
2030 void SpaceManager::print_on(outputStream* st) const { 2025 void SpaceManager::print_on(outputStream* st) const {
2031 2026
2032 for (ChunkIndex i = ZeroIndex; 2027 for (ChunkIndex i = ZeroIndex;
2047 block_freelists()->total_size()); 2042 block_freelists()->total_size());
2048 } 2043 }
2049 } 2044 }
2050 2045
2051 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2046 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2052 Mutex* lock, 2047 Mutex* lock) :
2053 VirtualSpaceList* vs_list) :
2054 _vs_list(vs_list),
2055 _mdtype(mdtype), 2048 _mdtype(mdtype),
2056 _allocated_blocks_words(0), 2049 _allocated_blocks_words(0),
2057 _allocated_chunks_words(0), 2050 _allocated_chunks_words(0),
2058 _allocated_chunks_count(0), 2051 _allocated_chunks_count(0),
2059 _lock(lock) 2052 _lock(lock)
2119 assert(cur->container() != NULL, "Container should have been set"); 2112 assert(cur->container() != NULL, "Container should have been set");
2120 cur->container()->dec_container_count(); 2113 cur->container()->dec_container_count();
2121 // Capture the next link before it is changed 2114 // Capture the next link before it is changed
2122 // by the call to return_chunk_at_head(); 2115 // by the call to return_chunk_at_head();
2123 Metachunk* next = cur->next(); 2116 Metachunk* next = cur->next();
2124 cur->set_is_free(true); 2117 DEBUG_ONLY(cur->set_is_tagged_free(true);)
2125 list->return_chunk_at_head(cur); 2118 list->return_chunk_at_head(cur);
2126 cur = next; 2119 cur = next;
2127 } 2120 }
2128 } 2121 }
2129 2122
2135 sum_capacity_in_chunks_in_use(), allocated_chunks_words())); 2128 sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2136 2129
2137 MutexLockerEx fcl(SpaceManager::expand_lock(), 2130 MutexLockerEx fcl(SpaceManager::expand_lock(),
2138 Mutex::_no_safepoint_check_flag); 2131 Mutex::_no_safepoint_check_flag);
2139 2132
2140 ChunkManager* chunk_manager = vs_list()->chunk_manager(); 2133 chunk_manager()->slow_locked_verify();
2141
2142 chunk_manager->slow_locked_verify();
2143 2134
2144 dec_total_from_size_metrics(); 2135 dec_total_from_size_metrics();
2145 2136
2146 if (TraceMetadataChunkAllocation && Verbose) { 2137 if (TraceMetadataChunkAllocation && Verbose) {
2147 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this); 2138 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2151 // Do not mangle freed Metachunks. The chunk size inside Metachunks 2142 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2152 // is during the freeing of a VirtualSpaceNodes. 2143 // is during the freeing of a VirtualSpaceNodes.
2153 2144
2154 // Have to update before the chunks_in_use lists are emptied 2145 // Have to update before the chunks_in_use lists are emptied
2155 // below. 2146 // below.
2156 chunk_manager->inc_free_chunks_total(allocated_chunks_words(), 2147 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2157 sum_count_in_chunks_in_use()); 2148 sum_count_in_chunks_in_use());
2158 2149
2159 // Add all the chunks in use by this space manager 2150 // Add all the chunks in use by this space manager
2160 // to the global list of free chunks. 2151 // to the global list of free chunks.
2161 2152
2162 // Follow each list of chunks-in-use and add them to the 2153 // Follow each list of chunks-in-use and add them to the
2167 gclog_or_tty->print_cr("returned %d %s chunks to freelist", 2158 gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2168 sum_count_in_chunks_in_use(i), 2159 sum_count_in_chunks_in_use(i),
2169 chunk_size_name(i)); 2160 chunk_size_name(i));
2170 } 2161 }
2171 Metachunk* chunks = chunks_in_use(i); 2162 Metachunk* chunks = chunks_in_use(i);
2172 chunk_manager->return_chunks(i, chunks); 2163 chunk_manager()->return_chunks(i, chunks);
2173 set_chunks_in_use(i, NULL); 2164 set_chunks_in_use(i, NULL);
2174 if (TraceMetadataChunkAllocation && Verbose) { 2165 if (TraceMetadataChunkAllocation && Verbose) {
2175 gclog_or_tty->print_cr("updated freelist count %d %s", 2166 gclog_or_tty->print_cr("updated freelist count %d %s",
2176 chunk_manager->free_chunks(i)->count(), 2167 chunk_manager()->free_chunks(i)->count(),
2177 chunk_size_name(i)); 2168 chunk_size_name(i));
2178 } 2169 }
2179 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); 2170 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2180 } 2171 }
2181 2172
2193 // Humongous chunks are never the current chunk. 2184 // Humongous chunks are never the current chunk.
2194 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2185 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2195 2186
2196 while (humongous_chunks != NULL) { 2187 while (humongous_chunks != NULL) {
2197 #ifdef ASSERT 2188 #ifdef ASSERT
2198 humongous_chunks->set_is_free(true); 2189 humongous_chunks->set_is_tagged_free(true);
2199 #endif 2190 #endif
2200 if (TraceMetadataChunkAllocation && Verbose) { 2191 if (TraceMetadataChunkAllocation && Verbose) {
2201 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", 2192 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2202 humongous_chunks, 2193 humongous_chunks,
2203 humongous_chunks->word_size()); 2194 humongous_chunks->word_size());
2204 } 2195 }
2205 assert(humongous_chunks->word_size() == (size_t) 2196 assert(humongous_chunks->word_size() == (size_t)
2206 align_size_up(humongous_chunks->word_size(), 2197 align_size_up(humongous_chunks->word_size(),
2207 HumongousChunkGranularity), 2198 smallest_chunk_size()),
2208 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT 2199 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2209 " granularity %d", 2200 " granularity %d",
2210 humongous_chunks->word_size(), HumongousChunkGranularity)); 2201 humongous_chunks->word_size(), smallest_chunk_size()));
2211 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2202 Metachunk* next_humongous_chunks = humongous_chunks->next();
2212 humongous_chunks->container()->dec_container_count(); 2203 humongous_chunks->container()->dec_container_count();
2213 chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks); 2204 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2214 humongous_chunks = next_humongous_chunks; 2205 humongous_chunks = next_humongous_chunks;
2215 } 2206 }
2216 if (TraceMetadataChunkAllocation && Verbose) { 2207 if (TraceMetadataChunkAllocation && Verbose) {
2217 gclog_or_tty->print_cr(""); 2208 gclog_or_tty->print_cr("");
2218 gclog_or_tty->print_cr("updated dictionary count %d %s", 2209 gclog_or_tty->print_cr("updated dictionary count %d %s",
2219 chunk_manager->humongous_dictionary()->total_count(), 2210 chunk_manager()->humongous_dictionary()->total_count(),
2220 chunk_size_name(HumongousIndex)); 2211 chunk_size_name(HumongousIndex));
2221 } 2212 }
2222 chunk_manager->slow_locked_verify(); 2213 chunk_manager()->slow_locked_verify();
2223 } 2214 }
2224 2215
2225 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { 2216 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2226 switch (index) { 2217 switch (index) {
2227 case SpecializedIndex: 2218 case SpecializedIndex:
2276 // Find the correct list and and set the current 2267 // Find the correct list and and set the current
2277 // chunk for that list. 2268 // chunk for that list.
2278 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size()); 2269 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2279 2270
2280 if (index != HumongousIndex) { 2271 if (index != HumongousIndex) {
2272 retire_current_chunk();
2281 set_current_chunk(new_chunk); 2273 set_current_chunk(new_chunk);
2282 new_chunk->set_next(chunks_in_use(index)); 2274 new_chunk->set_next(chunks_in_use(index));
2283 set_chunks_in_use(index, new_chunk); 2275 set_chunks_in_use(index, new_chunk);
2284 } else { 2276 } else {
2285 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2277 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2305 assert(new_chunk->is_empty(), "Not ready for reuse"); 2297 assert(new_chunk->is_empty(), "Not ready for reuse");
2306 if (TraceMetadataChunkAllocation && Verbose) { 2298 if (TraceMetadataChunkAllocation && Verbose) {
2307 gclog_or_tty->print("SpaceManager::add_chunk: %d) ", 2299 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2308 sum_count_in_chunks_in_use()); 2300 sum_count_in_chunks_in_use());
2309 new_chunk->print_on(gclog_or_tty); 2301 new_chunk->print_on(gclog_or_tty);
2310 if (vs_list() != NULL) { 2302 chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2311 vs_list()->chunk_manager()->locked_print_free_chunks(tty); 2303 }
2304 }
2305
2306 void SpaceManager::retire_current_chunk() {
2307 if (current_chunk() != NULL) {
2308 size_t remaining_words = current_chunk()->free_word_size();
2309 if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
2310 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2311 inc_used_metrics(remaining_words);
2312 } 2312 }
2313 } 2313 }
2314 } 2314 }
2315 2315
2316 Metachunk* SpaceManager::get_new_chunk(size_t word_size, 2316 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2317 size_t grow_chunks_by_words) { 2317 size_t grow_chunks_by_words) {
2318 2318 // Get a chunk from the chunk freelist
2319 Metachunk* next = vs_list()->get_new_chunk(word_size, 2319 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2320 grow_chunks_by_words, 2320
2321 medium_chunk_bunch()); 2321 if (next == NULL) {
2322 2322 next = vs_list()->get_new_chunk(word_size,
2323 if (TraceMetadataHumongousAllocation && 2323 grow_chunks_by_words,
2324 medium_chunk_bunch());
2325 }
2326
2327 if (TraceMetadataHumongousAllocation && next != NULL &&
2324 SpaceManager::is_humongous(next->word_size())) { 2328 SpaceManager::is_humongous(next->word_size())) {
2325 gclog_or_tty->print_cr(" new humongous chunk word size " PTR_FORMAT, 2329 gclog_or_tty->print_cr(" new humongous chunk word size "
2326 next->word_size()); 2330 PTR_FORMAT, next->word_size());
2327 } 2331 }
2328 2332
2329 return next; 2333 return next;
2330 } 2334 }
2331 2335
2344 p = fl->get_block(raw_word_size); 2348 p = fl->get_block(raw_word_size);
2345 } 2349 }
2346 if (p == NULL) { 2350 if (p == NULL) {
2347 p = allocate_work(raw_word_size); 2351 p = allocate_work(raw_word_size);
2348 } 2352 }
2349 Metadebug::deallocate_block_a_lot(this, raw_word_size);
2350 2353
2351 return p; 2354 return p;
2352 } 2355 }
2353 2356
2354 // Returns the address of spaced allocated for "word_size". 2357 // Returns the address of spaced allocated for "word_size".
2369 if (DumpSharedSpaces) { 2372 if (DumpSharedSpaces) {
2370 assert(current_chunk() != NULL, "should never happen"); 2373 assert(current_chunk() != NULL, "should never happen");
2371 inc_used_metrics(word_size); 2374 inc_used_metrics(word_size);
2372 return current_chunk()->allocate(word_size); // caller handles null result 2375 return current_chunk()->allocate(word_size); // caller handles null result
2373 } 2376 }
2377
2374 if (current_chunk() != NULL) { 2378 if (current_chunk() != NULL) {
2375 result = current_chunk()->allocate(word_size); 2379 result = current_chunk()->allocate(word_size);
2376 } 2380 }
2377 2381
2378 if (result == NULL) { 2382 if (result == NULL) {
2379 result = grow_and_allocate(word_size); 2383 result = grow_and_allocate(word_size);
2380 } 2384 }
2381 if (result != 0) { 2385
2386 if (result != NULL) {
2382 inc_used_metrics(word_size); 2387 inc_used_metrics(word_size);
2383 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2388 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2384 "Head of the list is being allocated"); 2389 "Head of the list is being allocated");
2385 } 2390 }
2386 2391
2439 for (Metachunk* curr = chunks_in_use(index); 2444 for (Metachunk* curr = chunks_in_use(index);
2440 curr != NULL; 2445 curr != NULL;
2441 curr = curr->next()) { 2446 curr = curr->next()) {
2442 out->print("%d) ", i++); 2447 out->print("%d) ", i++);
2443 curr->print_on(out); 2448 curr->print_on(out);
2444 if (TraceMetadataChunkAllocation && Verbose) {
2445 block_freelists()->print_on(out);
2446 }
2447 curr_total += curr->word_size(); 2449 curr_total += curr->word_size();
2448 used += curr->used_word_size(); 2450 used += curr->used_word_size();
2449 capacity += curr->capacity_word_size(); 2451 capacity += curr->word_size();
2450 waste += curr->free_word_size() + curr->overhead();; 2452 waste += curr->free_word_size() + curr->overhead();;
2451 } 2453 }
2454 }
2455
2456 if (TraceMetadataChunkAllocation && Verbose) {
2457 block_freelists()->print_on(out);
2452 } 2458 }
2453 2459
2454 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2460 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2455 // Free space isn't wasted. 2461 // Free space isn't wasted.
2456 waste -= free; 2462 waste -= free;
2536 } 2542 }
2537 } 2543 }
2538 return used * BytesPerWord; 2544 return used * BytesPerWord;
2539 } 2545 }
2540 2546
2541 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) { 2547 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2542 size_t free = 0; 2548 size_t free = 0;
2543 ClassLoaderDataGraphMetaspaceIterator iter; 2549 ClassLoaderDataGraphMetaspaceIterator iter;
2544 while (iter.repeat()) { 2550 while (iter.repeat()) {
2545 Metaspace* msp = iter.get_next(); 2551 Metaspace* msp = iter.get_next();
2546 if (msp != NULL) { 2552 if (msp != NULL) {
2547 free += msp->free_words(mdtype); 2553 free += msp->free_words_slow(mdtype);
2548 } 2554 }
2549 } 2555 }
2550 return free * BytesPerWord; 2556 return free * BytesPerWord;
2551 } 2557 }
2552 2558
2565 } 2571 }
2566 } 2572 }
2567 return capacity * BytesPerWord; 2573 return capacity * BytesPerWord;
2568 } 2574 }
2569 2575
2570 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) { 2576 size_t MetaspaceAux::capacity_bytes_slow() {
2577 #ifdef PRODUCT
2578 // Use allocated_capacity_bytes() in PRODUCT instead of this function.
2579 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2580 #endif
2581 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2582 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2583 assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
2584 err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
2585 " class_capacity + non_class_capacity " SIZE_FORMAT
2586 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2587 allocated_capacity_bytes(), class_capacity + non_class_capacity,
2588 class_capacity, non_class_capacity));
2589
2590 return class_capacity + non_class_capacity;
2591 }
2592
2593 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2571 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2594 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2572 return list == NULL ? 0 : list->virtual_space_total(); 2595 return list == NULL ? 0 : list->reserved_bytes();
2573 } 2596 }
2574 2597
2575 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); } 2598 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2576
2577 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
2578 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2599 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2579 if (list == NULL) { 2600 return list == NULL ? 0 : list->committed_bytes();
2601 }
2602
2603 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2604
2605 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2606 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2607 if (chunk_manager == NULL) {
2580 return 0; 2608 return 0;
2581 } 2609 }
2582 ChunkManager* chunk = list->chunk_manager(); 2610 chunk_manager->slow_verify();
2583 chunk->slow_verify(); 2611 return chunk_manager->free_chunks_total_words();
2584 return chunk->free_chunks_total(); 2612 }
2585 } 2613
2586 2614 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2587 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) { 2615 return free_chunks_total_words(mdtype) * BytesPerWord;
2588 return free_chunks_total(mdtype) * BytesPerWord; 2616 }
2589 } 2617
2590 2618 size_t MetaspaceAux::free_chunks_total_words() {
2591 size_t MetaspaceAux::free_chunks_total() { 2619 return free_chunks_total_words(Metaspace::ClassType) +
2592 return free_chunks_total(Metaspace::ClassType) + 2620 free_chunks_total_words(Metaspace::NonClassType);
2593 free_chunks_total(Metaspace::NonClassType); 2621 }
2594 } 2622
2595 2623 size_t MetaspaceAux::free_chunks_total_bytes() {
2596 size_t MetaspaceAux::free_chunks_total_in_bytes() { 2624 return free_chunks_total_words() * BytesPerWord;
2597 return free_chunks_total() * BytesPerWord;
2598 } 2625 }
2599 2626
2600 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2627 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2601 gclog_or_tty->print(", [Metaspace:"); 2628 gclog_or_tty->print(", [Metaspace:");
2602 if (PrintGCDetails && Verbose) { 2629 if (PrintGCDetails && Verbose) {
2603 gclog_or_tty->print(" " SIZE_FORMAT 2630 gclog_or_tty->print(" " SIZE_FORMAT
2604 "->" SIZE_FORMAT 2631 "->" SIZE_FORMAT
2605 "(" SIZE_FORMAT ")", 2632 "(" SIZE_FORMAT ")",
2606 prev_metadata_used, 2633 prev_metadata_used,
2607 allocated_used_bytes(), 2634 allocated_used_bytes(),
2608 reserved_in_bytes()); 2635 reserved_bytes());
2609 } else { 2636 } else {
2610 gclog_or_tty->print(" " SIZE_FORMAT "K" 2637 gclog_or_tty->print(" " SIZE_FORMAT "K"
2611 "->" SIZE_FORMAT "K" 2638 "->" SIZE_FORMAT "K"
2612 "(" SIZE_FORMAT "K)", 2639 "(" SIZE_FORMAT "K)",
2613 prev_metadata_used / K, 2640 prev_metadata_used/K,
2614 allocated_used_bytes() / K, 2641 allocated_used_bytes()/K,
2615 reserved_in_bytes()/ K); 2642 reserved_bytes()/K);
2616 } 2643 }
2617 2644
2618 gclog_or_tty->print("]"); 2645 gclog_or_tty->print("]");
2619 } 2646 }
2620 2647
2621 // This is printed when PrintGCDetails 2648 // This is printed when PrintGCDetails
2622 void MetaspaceAux::print_on(outputStream* out) { 2649 void MetaspaceAux::print_on(outputStream* out) {
2623 Metaspace::MetadataType nct = Metaspace::NonClassType; 2650 Metaspace::MetadataType nct = Metaspace::NonClassType;
2624 2651
2625 out->print_cr(" Metaspace total " 2652 out->print_cr(" Metaspace "
2626 SIZE_FORMAT "K, used " SIZE_FORMAT "K," 2653 "used " SIZE_FORMAT "K, "
2627 " reserved " SIZE_FORMAT "K", 2654 "capacity " SIZE_FORMAT "K, "
2628 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K); 2655 "committed " SIZE_FORMAT "K, "
2629 2656 "reserved " SIZE_FORMAT "K",
2630 out->print_cr(" data space " 2657 allocated_used_bytes()/K,
2631 SIZE_FORMAT "K, used " SIZE_FORMAT "K," 2658 allocated_capacity_bytes()/K,
2632 " reserved " SIZE_FORMAT "K", 2659 committed_bytes()/K,
2633 allocated_capacity_bytes(nct)/K, 2660 reserved_bytes()/K);
2634 allocated_used_bytes(nct)/K, 2661
2635 reserved_in_bytes(nct)/K);
2636 if (Metaspace::using_class_space()) { 2662 if (Metaspace::using_class_space()) {
2637 Metaspace::MetadataType ct = Metaspace::ClassType; 2663 Metaspace::MetadataType ct = Metaspace::ClassType;
2638 out->print_cr(" class space " 2664 out->print_cr(" class space "
2639 SIZE_FORMAT "K, used " SIZE_FORMAT "K," 2665 "used " SIZE_FORMAT "K, "
2640 " reserved " SIZE_FORMAT "K", 2666 "capacity " SIZE_FORMAT "K, "
2667 "committed " SIZE_FORMAT "K, "
2668 "reserved " SIZE_FORMAT "K",
2669 allocated_used_bytes(ct)/K,
2641 allocated_capacity_bytes(ct)/K, 2670 allocated_capacity_bytes(ct)/K,
2642 allocated_used_bytes(ct)/K, 2671 committed_bytes(ct)/K,
2643 reserved_in_bytes(ct)/K); 2672 reserved_bytes(ct)/K);
2644 } 2673 }
2645 } 2674 }
2646 2675
2647 // Print information for class space and data space separately. 2676 // Print information for class space and data space separately.
2648 // This is almost the same as above. 2677 // This is almost the same as above.
2649 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2678 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2650 size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype); 2679 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2651 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2680 size_t capacity_bytes = capacity_bytes_slow(mdtype);
2652 size_t used_bytes = used_bytes_slow(mdtype); 2681 size_t used_bytes = used_bytes_slow(mdtype);
2653 size_t free_bytes = free_in_bytes(mdtype); 2682 size_t free_bytes = free_bytes_slow(mdtype);
2654 size_t used_and_free = used_bytes + free_bytes + 2683 size_t used_and_free = used_bytes + free_bytes +
2655 free_chunks_capacity_bytes; 2684 free_chunks_capacity_bytes;
2656 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2685 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2657 "K + unused in chunks " SIZE_FORMAT "K + " 2686 "K + unused in chunks " SIZE_FORMAT "K + "
2658 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2687 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2730 out->print("class space: "); print_on(out, Metaspace::ClassType); 2759 out->print("class space: "); print_on(out, Metaspace::ClassType);
2731 print_waste(out); 2760 print_waste(out);
2732 } 2761 }
2733 2762
2734 void MetaspaceAux::verify_free_chunks() { 2763 void MetaspaceAux::verify_free_chunks() {
2735 Metaspace::space_list()->chunk_manager()->verify(); 2764 Metaspace::chunk_manager_metadata()->verify();
2736 if (Metaspace::using_class_space()) { 2765 if (Metaspace::using_class_space()) {
2737 Metaspace::class_space_list()->chunk_manager()->verify(); 2766 Metaspace::chunk_manager_class()->verify();
2738 } 2767 }
2739 } 2768 }
2740 2769
2741 void MetaspaceAux::verify_capacity() { 2770 void MetaspaceAux::verify_capacity() {
2742 #ifdef ASSERT 2771 #ifdef ASSERT
2789 // Metaspace methods 2818 // Metaspace methods
2790 2819
2791 size_t Metaspace::_first_chunk_word_size = 0; 2820 size_t Metaspace::_first_chunk_word_size = 0;
2792 size_t Metaspace::_first_class_chunk_word_size = 0; 2821 size_t Metaspace::_first_class_chunk_word_size = 0;
2793 2822
2823 size_t Metaspace::_commit_alignment = 0;
2824 size_t Metaspace::_reserve_alignment = 0;
2825
2794 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 2826 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2795 initialize(lock, type); 2827 initialize(lock, type);
2796 } 2828 }
2797 2829
2798 Metaspace::~Metaspace() { 2830 Metaspace::~Metaspace() {
2803 } 2835 }
2804 2836
2805 VirtualSpaceList* Metaspace::_space_list = NULL; 2837 VirtualSpaceList* Metaspace::_space_list = NULL;
2806 VirtualSpaceList* Metaspace::_class_space_list = NULL; 2838 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2807 2839
2840 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2841 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2842
2808 #define VIRTUALSPACEMULTIPLIER 2 2843 #define VIRTUALSPACEMULTIPLIER 2
2809 2844
2810 #ifdef _LP64 2845 #ifdef _LP64
2846 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2847
2811 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 2848 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2812 // Figure out the narrow_klass_base and the narrow_klass_shift. The 2849 // Figure out the narrow_klass_base and the narrow_klass_shift. The
2813 // narrow_klass_base is the lower of the metaspace base and the cds base 2850 // narrow_klass_base is the lower of the metaspace base and the cds base
2814 // (if cds is enabled). The narrow_klass_shift depends on the distance 2851 // (if cds is enabled). The narrow_klass_shift depends on the distance
2815 // between the lower base and higher address. 2852 // between the lower base and higher address.
2816 address lower_base; 2853 address lower_base;
2817 address higher_address; 2854 address higher_address;
2818 if (UseSharedSpaces) { 2855 if (UseSharedSpaces) {
2819 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2856 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2820 (address)(metaspace_base + class_metaspace_size())); 2857 (address)(metaspace_base + compressed_class_space_size()));
2821 lower_base = MIN2(metaspace_base, cds_base); 2858 lower_base = MIN2(metaspace_base, cds_base);
2822 } else { 2859 } else {
2823 higher_address = metaspace_base + class_metaspace_size(); 2860 higher_address = metaspace_base + compressed_class_space_size();
2824 lower_base = metaspace_base; 2861 lower_base = metaspace_base;
2825 } 2862
2863 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2864 // If compressed class space fits in lower 32G, we don't need a base.
2865 if (higher_address <= (address)klass_encoding_max) {
2866 lower_base = 0; // effectively lower base is zero.
2867 }
2868 }
2869
2826 Universe::set_narrow_klass_base(lower_base); 2870 Universe::set_narrow_klass_base(lower_base);
2827 if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) { 2871
2872 if ((uint64_t)(higher_address - lower_base) < UnscaledClassSpaceMax) {
2828 Universe::set_narrow_klass_shift(0); 2873 Universe::set_narrow_klass_shift(0);
2829 } else { 2874 } else {
2830 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 2875 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2831 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 2876 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2832 } 2877 }
2834 2879
2835 // Return TRUE if the specified metaspace_base and cds_base are close enough 2880 // Return TRUE if the specified metaspace_base and cds_base are close enough
2836 // to work with compressed klass pointers. 2881 // to work with compressed klass pointers.
2837 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 2882 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2838 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 2883 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2839 assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs"); 2884 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2840 address lower_base = MIN2((address)metaspace_base, cds_base); 2885 address lower_base = MIN2((address)metaspace_base, cds_base);
2841 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2886 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2842 (address)(metaspace_base + class_metaspace_size())); 2887 (address)(metaspace_base + compressed_class_space_size()));
2843 return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint); 2888 return ((uint64_t)(higher_address - lower_base) < UnscaledClassSpaceMax);
2844 } 2889 }
2845 2890
2846 // Try to allocate the metaspace at the requested addr. 2891 // Try to allocate the metaspace at the requested addr.
2847 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 2892 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2848 assert(using_class_space(), "called improperly"); 2893 assert(using_class_space(), "called improperly");
2849 assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs"); 2894 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2850 assert(class_metaspace_size() < KlassEncodingMetaspaceMax, 2895 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
2851 "Metaspace size is too big"); 2896 "Metaspace size is too big");
2852 2897 assert_is_ptr_aligned(requested_addr, _reserve_alignment);
2853 ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(), 2898 assert_is_ptr_aligned(cds_base, _reserve_alignment);
2854 os::vm_allocation_granularity(), 2899 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
2855 false, requested_addr, 0); 2900
2901 // Don't use large pages for the class space.
2902 bool large_pages = false;
2903
2904 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
2905 _reserve_alignment,
2906 large_pages,
2907 requested_addr, 0);
2856 if (!metaspace_rs.is_reserved()) { 2908 if (!metaspace_rs.is_reserved()) {
2857 if (UseSharedSpaces) { 2909 if (UseSharedSpaces) {
2910 size_t increment = align_size_up(1*G, _reserve_alignment);
2911
2858 // Keep trying to allocate the metaspace, increasing the requested_addr 2912 // Keep trying to allocate the metaspace, increasing the requested_addr
2859 // by 1GB each time, until we reach an address that will no longer allow 2913 // by 1GB each time, until we reach an address that will no longer allow
2860 // use of CDS with compressed klass pointers. 2914 // use of CDS with compressed klass pointers.
2861 char *addr = requested_addr; 2915 char *addr = requested_addr;
2862 while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) && 2916 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
2863 can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) { 2917 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
2864 addr = addr + 1*G; 2918 addr = addr + increment;
2865 metaspace_rs = ReservedSpace(class_metaspace_size(), 2919 metaspace_rs = ReservedSpace(compressed_class_space_size(),
2866 os::vm_allocation_granularity(), false, addr, 0); 2920 _reserve_alignment, large_pages, addr, 0);
2867 } 2921 }
2868 } 2922 }
2869 2923
2870 // If no successful allocation then try to allocate the space anywhere. If 2924 // If no successful allocation then try to allocate the space anywhere. If
2871 // that fails then OOM doom. At this point we cannot try allocating the 2925 // that fails then OOM doom. At this point we cannot try allocating the
2872 // metaspace as if UseCompressedKlassPointers is off because too much 2926 // metaspace as if UseCompressedClassPointers is off because too much
2873 // initialization has happened that depends on UseCompressedKlassPointers. 2927 // initialization has happened that depends on UseCompressedClassPointers.
2874 // So, UseCompressedKlassPointers cannot be turned off at this point. 2928 // So, UseCompressedClassPointers cannot be turned off at this point.
2875 if (!metaspace_rs.is_reserved()) { 2929 if (!metaspace_rs.is_reserved()) {
2876 metaspace_rs = ReservedSpace(class_metaspace_size(), 2930 metaspace_rs = ReservedSpace(compressed_class_space_size(),
2877 os::vm_allocation_granularity(), false); 2931 _reserve_alignment, large_pages);
2878 if (!metaspace_rs.is_reserved()) { 2932 if (!metaspace_rs.is_reserved()) {
2879 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", 2933 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
2880 class_metaspace_size())); 2934 compressed_class_space_size()));
2881 } 2935 }
2882 } 2936 }
2883 } 2937 }
2884 2938
2885 // If we got here then the metaspace got allocated. 2939 // If we got here then the metaspace got allocated.
2897 initialize_class_space(metaspace_rs); 2951 initialize_class_space(metaspace_rs);
2898 2952
2899 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { 2953 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
2900 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, 2954 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
2901 Universe::narrow_klass_base(), Universe::narrow_klass_shift()); 2955 Universe::narrow_klass_base(), Universe::narrow_klass_shift());
2902 gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, 2956 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
2903 class_metaspace_size(), metaspace_rs.base(), requested_addr); 2957 compressed_class_space_size(), metaspace_rs.base(), requested_addr);
2904 } 2958 }
2905 } 2959 }
2906 2960
2907 // For UseCompressedKlassPointers the class space is reserved above the top of 2961 // For UseCompressedClassPointers the class space is reserved above the top of
2908 // the Java heap. The argument passed in is at the base of the compressed space. 2962 // the Java heap. The argument passed in is at the base of the compressed space.
2909 void Metaspace::initialize_class_space(ReservedSpace rs) { 2963 void Metaspace::initialize_class_space(ReservedSpace rs) {
2910 // The reserved space size may be bigger because of alignment, esp with UseLargePages 2964 // The reserved space size may be bigger because of alignment, esp with UseLargePages
2911 assert(rs.size() >= ClassMetaspaceSize, 2965 assert(rs.size() >= CompressedClassSpaceSize,
2912 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize)); 2966 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
2913 assert(using_class_space(), "Must be using class space"); 2967 assert(using_class_space(), "Must be using class space");
2914 _class_space_list = new VirtualSpaceList(rs); 2968 _class_space_list = new VirtualSpaceList(rs);
2969 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
2970
2971 if (!_class_space_list->initialization_succeeded()) {
2972 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
2973 }
2915 } 2974 }
2916 2975
2917 #endif 2976 #endif
2977
2978 // Align down. If the aligning result in 0, return 'alignment'.
2979 static size_t restricted_align_down(size_t size, size_t alignment) {
2980 return MAX2(alignment, align_size_down_(size, alignment));
2981 }
2982
2983 void Metaspace::ergo_initialize() {
2984 if (DumpSharedSpaces) {
2985 // Using large pages when dumping the shared archive is currently not implemented.
2986 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
2987 }
2988
2989 size_t page_size = os::vm_page_size();
2990 if (UseLargePages && UseLargePagesInMetaspace) {
2991 page_size = os::large_page_size();
2992 }
2993
2994 _commit_alignment = page_size;
2995 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
2996
2997 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
2998 // override if MaxMetaspaceSize was set on the command line or not.
2999 // This information is needed later to conform to the specification of the
3000 // java.lang.management.MemoryUsage API.
3001 //
3002 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3003 // globals.hpp to the aligned value, but this is not possible, since the
3004 // alignment depends on other flags being parsed.
3005 MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
3006
3007 if (MetaspaceSize > MaxMetaspaceSize) {
3008 MetaspaceSize = MaxMetaspaceSize;
3009 }
3010
3011 MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
3012
3013 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3014
3015 if (MetaspaceSize < 256*K) {
3016 vm_exit_during_initialization("Too small initial Metaspace size");
3017 }
3018
3019 MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
3020 MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
3021
3022 CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
3023 set_compressed_class_space_size(CompressedClassSpaceSize);
3024 }
2918 3025
2919 void Metaspace::global_initialize() { 3026 void Metaspace::global_initialize() {
2920 // Initialize the alignment for shared spaces. 3027 // Initialize the alignment for shared spaces.
2921 int max_alignment = os::vm_page_size(); 3028 int max_alignment = os::vm_page_size();
2922 size_t cds_total = 0; 3029 size_t cds_total = 0;
2923 3030
2924 set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
2925 os::vm_allocation_granularity()));
2926
2927 MetaspaceShared::set_max_alignment(max_alignment); 3031 MetaspaceShared::set_max_alignment(max_alignment);
2928 3032
2929 if (DumpSharedSpaces) { 3033 if (DumpSharedSpaces) {
2930 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3034 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2931 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3035 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2932 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3036 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
2933 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3037 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
2934 3038
2935 // Initialize with the sum of the shared space sizes. The read-only 3039 // Initialize with the sum of the shared space sizes. The read-only
2936 // and read write metaspace chunks will be allocated out of this and the 3040 // and read write metaspace chunks will be allocated out of this and the
2937 // remainder is the misc code and data chunks. 3041 // remainder is the misc code and data chunks.
2938 cds_total = FileMapInfo::shared_spaces_size(); 3042 cds_total = FileMapInfo::shared_spaces_size();
3043 cds_total = align_size_up(cds_total, _reserve_alignment);
2939 _space_list = new VirtualSpaceList(cds_total/wordSize); 3044 _space_list = new VirtualSpaceList(cds_total/wordSize);
3045 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3046
3047 if (!_space_list->initialization_succeeded()) {
3048 vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3049 }
2940 3050
2941 #ifdef _LP64 3051 #ifdef _LP64
3052 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3053 vm_exit_during_initialization("Unable to dump shared archive.",
3054 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3055 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3056 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3057 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3058 }
3059
2942 // Set the compressed klass pointer base so that decoding of these pointers works 3060 // Set the compressed klass pointer base so that decoding of these pointers works
2943 // properly when creating the shared archive. 3061 // properly when creating the shared archive.
2944 assert(UseCompressedOops && UseCompressedKlassPointers, 3062 assert(UseCompressedOops && UseCompressedClassPointers,
2945 "UseCompressedOops and UseCompressedKlassPointers must be set"); 3063 "UseCompressedOops and UseCompressedClassPointers must be set");
2946 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3064 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
2947 if (TraceMetavirtualspaceAllocation && Verbose) { 3065 if (TraceMetavirtualspaceAllocation && Verbose) {
2948 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3066 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
2949 _space_list->current_virtual_space()->bottom()); 3067 _space_list->current_virtual_space()->bottom());
2950 } 3068 }
2951 3069
2952 // Set the shift to zero.
2953 assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
2954 "CDS region is too large");
2955 Universe::set_narrow_klass_shift(0); 3070 Universe::set_narrow_klass_shift(0);
2956 #endif 3071 #endif
2957 3072
2958 } else { 3073 } else {
2959 // If using shared space, open the file that contains the shared space 3074 // If using shared space, open the file that contains the shared space
2968 // initialization fails, shared spaces [UseSharedSpaces] are 3083 // initialization fails, shared spaces [UseSharedSpaces] are
2969 // disabled and the file is closed. 3084 // disabled and the file is closed.
2970 // Map in spaces now also 3085 // Map in spaces now also
2971 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3086 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2972 FileMapInfo::set_current_info(mapinfo); 3087 FileMapInfo::set_current_info(mapinfo);
3088 cds_total = FileMapInfo::shared_spaces_size();
3089 cds_address = (address)mapinfo->region_base(0);
2973 } else { 3090 } else {
2974 assert(!mapinfo->is_open() && !UseSharedSpaces, 3091 assert(!mapinfo->is_open() && !UseSharedSpaces,
2975 "archive file not closed or shared spaces not disabled."); 3092 "archive file not closed or shared spaces not disabled.");
2976 } 3093 }
2977 cds_total = FileMapInfo::shared_spaces_size();
2978 cds_address = (address)mapinfo->region_base(0);
2979 } 3094 }
2980 3095
2981 #ifdef _LP64 3096 #ifdef _LP64
2982 // If UseCompressedKlassPointers is set then allocate the metaspace area 3097 // If UseCompressedClassPointers is set then allocate the metaspace area
2983 // above the heap and above the CDS area (if it exists). 3098 // above the heap and above the CDS area (if it exists).
2984 if (using_class_space()) { 3099 if (using_class_space()) {
2985 if (UseSharedSpaces) { 3100 if (UseSharedSpaces) {
2986 allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address); 3101 char* cds_end = (char*)(cds_address + cds_total);
3102 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3103 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
2987 } else { 3104 } else {
2988 allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0); 3105 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3106 allocate_metaspace_compressed_klass_ptrs(base, 0);
2989 } 3107 }
2990 } 3108 }
2991 #endif 3109 #endif
2992 3110
2993 // Initialize these before initializing the VirtualSpaceList 3111 // Initialize these before initializing the VirtualSpaceList
2995 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3113 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
2996 // Make the first class chunk bigger than a medium chunk so it's not put 3114 // Make the first class chunk bigger than a medium chunk so it's not put
2997 // on the medium chunk list. The next chunk will be small and progress 3115 // on the medium chunk list. The next chunk will be small and progress
2998 // from there. This size calculated by -version. 3116 // from there. This size calculated by -version.
2999 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3117 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3000 (ClassMetaspaceSize/BytesPerWord)*2); 3118 (CompressedClassSpaceSize/BytesPerWord)*2);
3001 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3119 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3002 // Arbitrarily set the initial virtual space to a multiple 3120 // Arbitrarily set the initial virtual space to a multiple
3003 // of the boot class loader size. 3121 // of the boot class loader size.
3004 size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size(); 3122 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3123 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3124
3005 // Initialize the list of virtual spaces. 3125 // Initialize the list of virtual spaces.
3006 _space_list = new VirtualSpaceList(word_size); 3126 _space_list = new VirtualSpaceList(word_size);
3007 } 3127 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3128
3129 if (!_space_list->initialization_succeeded()) {
3130 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3131 }
3132 }
3133
3134 MetaspaceGC::initialize();
3135 }
3136
3137 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3138 size_t chunk_word_size,
3139 size_t chunk_bunch) {
3140 // Get a chunk from the chunk freelist
3141 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3142 if (chunk != NULL) {
3143 return chunk;
3144 }
3145
3146 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3008 } 3147 }
3009 3148
3010 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3149 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3011 3150
3012 assert(space_list() != NULL, 3151 assert(space_list() != NULL,
3013 "Metadata VirtualSpaceList has not been initialized"); 3152 "Metadata VirtualSpaceList has not been initialized");
3014 3153 assert(chunk_manager_metadata() != NULL,
3015 _vsm = new SpaceManager(NonClassType, lock, space_list()); 3154 "Metadata ChunkManager has not been initialized");
3155
3156 _vsm = new SpaceManager(NonClassType, lock);
3016 if (_vsm == NULL) { 3157 if (_vsm == NULL) {
3017 return; 3158 return;
3018 } 3159 }
3019 size_t word_size; 3160 size_t word_size;
3020 size_t class_word_size; 3161 size_t class_word_size;
3021 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); 3162 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3022 3163
3023 if (using_class_space()) { 3164 if (using_class_space()) {
3024 assert(class_space_list() != NULL, 3165 assert(class_space_list() != NULL,
3025 "Class VirtualSpaceList has not been initialized"); 3166 "Class VirtualSpaceList has not been initialized");
3167 assert(chunk_manager_class() != NULL,
3168 "Class ChunkManager has not been initialized");
3026 3169
3027 // Allocate SpaceManager for classes. 3170 // Allocate SpaceManager for classes.
3028 _class_vsm = new SpaceManager(ClassType, lock, class_space_list()); 3171 _class_vsm = new SpaceManager(ClassType, lock);
3029 if (_class_vsm == NULL) { 3172 if (_class_vsm == NULL) {
3030 return; 3173 return;
3031 } 3174 }
3032 } 3175 }
3033 3176
3034 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3177 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3035 3178
3036 // Allocate chunk for metadata objects 3179 // Allocate chunk for metadata objects
3037 Metachunk* new_chunk = 3180 Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3038 space_list()->get_initialization_chunk(word_size, 3181 word_size,
3039 vsm()->medium_chunk_bunch()); 3182 vsm()->medium_chunk_bunch());
3040 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); 3183 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3041 if (new_chunk != NULL) { 3184 if (new_chunk != NULL) {
3042 // Add to this manager's list of chunks in use and current_chunk(). 3185 // Add to this manager's list of chunks in use and current_chunk().
3043 vsm()->add_chunk(new_chunk, true); 3186 vsm()->add_chunk(new_chunk, true);
3044 } 3187 }
3045 3188
3046 // Allocate chunk for class metadata objects 3189 // Allocate chunk for class metadata objects
3047 if (using_class_space()) { 3190 if (using_class_space()) {
3048 Metachunk* class_chunk = 3191 Metachunk* class_chunk = get_initialization_chunk(ClassType,
3049 class_space_list()->get_initialization_chunk(class_word_size, 3192 class_word_size,
3050 class_vsm()->medium_chunk_bunch()); 3193 class_vsm()->medium_chunk_bunch());
3051 if (class_chunk != NULL) { 3194 if (class_chunk != NULL) {
3052 class_vsm()->add_chunk(class_chunk, true); 3195 class_vsm()->add_chunk(class_chunk, true);
3053 } 3196 }
3054 } 3197 }
3055 3198
3062 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3205 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3063 } 3206 }
3064 3207
3065 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3208 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3066 // DumpSharedSpaces doesn't use class metadata area (yet) 3209 // DumpSharedSpaces doesn't use class metadata area (yet)
3067 // Also, don't use class_vsm() unless UseCompressedKlassPointers is true. 3210 // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3068 if (mdtype == ClassType && using_class_space()) { 3211 if (is_class_space_allocation(mdtype)) {
3069 return class_vsm()->allocate(word_size); 3212 return class_vsm()->allocate(word_size);
3070 } else { 3213 } else {
3071 return vsm()->allocate(word_size); 3214 return vsm()->allocate(word_size);
3072 } 3215 }
3073 } 3216 }
3074 3217
3075 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3218 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3076 MetaWord* result; 3219 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3077 MetaspaceGC::set_expand_after_GC(true); 3220 assert(delta_bytes > 0, "Must be");
3078 size_t before_inc = MetaspaceGC::capacity_until_GC(); 3221
3079 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; 3222 size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3080 MetaspaceGC::inc_capacity_until_GC(delta_bytes); 3223 size_t before_inc = after_inc - delta_bytes;
3224
3081 if (PrintGCDetails && Verbose) { 3225 if (PrintGCDetails && Verbose) {
3082 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT 3226 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3083 " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); 3227 " to " SIZE_FORMAT, before_inc, after_inc);
3084 } 3228 }
3085 3229
3086 result = allocate(word_size, mdtype); 3230 return allocate(word_size, mdtype);
3087
3088 return result;
3089 } 3231 }
3090 3232
3091 // Space allocated in the Metaspace. This may 3233 // Space allocated in the Metaspace. This may
3092 // be across several metadata virtual spaces. 3234 // be across several metadata virtual spaces.
3093 char* Metaspace::bottom() const { 3235 char* Metaspace::bottom() const {
3101 } else { 3243 } else {
3102 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3244 return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
3103 } 3245 }
3104 } 3246 }
3105 3247
3106 size_t Metaspace::free_words(MetadataType mdtype) const { 3248 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3107 if (mdtype == ClassType) { 3249 if (mdtype == ClassType) {
3108 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3250 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3109 } else { 3251 } else {
3110 return vsm()->sum_free_in_chunks_in_use(); 3252 return vsm()->sum_free_in_chunks_in_use();
3111 } 3253 }
3165 vsm()->deallocate(ptr, word_size); 3307 vsm()->deallocate(ptr, word_size);
3166 } 3308 }
3167 } 3309 }
3168 } 3310 }
3169 3311
3170 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3312
3313 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3171 bool read_only, MetaspaceObj::Type type, TRAPS) { 3314 bool read_only, MetaspaceObj::Type type, TRAPS) {
3172 if (HAS_PENDING_EXCEPTION) { 3315 if (HAS_PENDING_EXCEPTION) {
3173 assert(false, "Should not allocate with exception pending"); 3316 assert(false, "Should not allocate with exception pending");
3174 return NULL; // caller does a CHECK_NULL too 3317 return NULL; // caller does a CHECK_NULL too
3175 } 3318 }
3176 3319
3177 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3178
3179 // SSS: Should we align the allocations and make sure the sizes are aligned.
3180 MetaWord* result = NULL;
3181
3182 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3320 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3183 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3321 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3322
3184 // Allocate in metaspaces without taking out a lock, because it deadlocks 3323 // Allocate in metaspaces without taking out a lock, because it deadlocks
3185 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3324 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3186 // to revisit this for application class data sharing. 3325 // to revisit this for application class data sharing.
3187 if (DumpSharedSpaces) { 3326 if (DumpSharedSpaces) {
3188 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3327 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3189 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3328 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3190 result = space->allocate(word_size, NonClassType); 3329 MetaWord* result = space->allocate(word_size, NonClassType);
3191 if (result == NULL) { 3330 if (result == NULL) {
3192 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3331 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3193 } else { 3332 }
3194 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); 3333
3195 } 3334 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3196 return Metablock::initialize(result, word_size); 3335
3197 } 3336 // Zero initialize.
3198 3337 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3199 result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3338
3339 return result;
3340 }
3341
3342 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3343
3344 // Try to allocate metadata.
3345 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3200 3346
3201 if (result == NULL) { 3347 if (result == NULL) {
3202 // Try to clean out some memory and retry. 3348 // Allocation failed.
3203 result = 3349 if (is_init_completed()) {
3204 Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3350 // Only start a GC if the bootstrapping has completed.
3205 loader_data, word_size, mdtype); 3351
3206 3352 // Try to clean out some memory and retry.
3207 // If result is still null, we are out of memory. 3353 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3208 if (result == NULL) { 3354 loader_data, word_size, mdtype);
3209 if (Verbose && TraceMetadataChunkAllocation) { 3355 }
3210 gclog_or_tty->print_cr("Metaspace allocation failed for size " 3356 }
3211 SIZE_FORMAT, word_size); 3357
3212 if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty); 3358 if (result == NULL) {
3213 MetaspaceAux::dump(gclog_or_tty); 3359 report_metadata_oome(loader_data, word_size, mdtype, CHECK_NULL);
3214 } 3360 }
3215 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3361
3216 const char* space_string = (mdtype == ClassType) ? "Class Metadata space" : 3362 // Zero initialize.
3217 "Metadata space"; 3363 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3218 report_java_out_of_memory(space_string); 3364
3219 3365 return result;
3220 if (JvmtiExport::should_post_resource_exhausted()) { 3366 }
3221 JvmtiExport::post_resource_exhausted( 3367
3222 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3368 size_t Metaspace::class_chunk_size(size_t word_size) {
3223 space_string); 3369 assert(using_class_space(), "Has to use class space");
3224 } 3370 return class_vsm()->calc_chunk_size(word_size);
3225 if (mdtype == ClassType) { 3371 }
3226 THROW_OOP_0(Universe::out_of_memory_error_class_metaspace()); 3372
3227 } else { 3373 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
3228 THROW_OOP_0(Universe::out_of_memory_error_metaspace()); 3374 // If result is still null, we are out of memory.
3229 } 3375 if (Verbose && TraceMetadataChunkAllocation) {
3230 } 3376 gclog_or_tty->print_cr("Metaspace allocation failed for size "
3231 } 3377 SIZE_FORMAT, word_size);
3232 return Metablock::initialize(result, word_size); 3378 if (loader_data->metaspace_or_null() != NULL) {
3379 loader_data->dump(gclog_or_tty);
3380 }
3381 MetaspaceAux::dump(gclog_or_tty);
3382 }
3383
3384 bool out_of_compressed_class_space = false;
3385 if (is_class_space_allocation(mdtype)) {
3386 Metaspace* metaspace = loader_data->metaspace_non_null();
3387 out_of_compressed_class_space =
3388 MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3389 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3390 CompressedClassSpaceSize;
3391 }
3392
3393 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3394 const char* space_string = out_of_compressed_class_space ?
3395 "Compressed class space" : "Metaspace";
3396
3397 report_java_out_of_memory(space_string);
3398
3399 if (JvmtiExport::should_post_resource_exhausted()) {
3400 JvmtiExport::post_resource_exhausted(
3401 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3402 space_string);
3403 }
3404
3405 if (!is_init_completed()) {
3406 vm_exit_during_initialization("OutOfMemoryError", space_string);
3407 }
3408
3409 if (out_of_compressed_class_space) {
3410 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3411 } else {
3412 THROW_OOP(Universe::out_of_memory_error_metaspace());
3413 }
3233 } 3414 }
3234 3415
3235 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3416 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3236 assert(DumpSharedSpaces, "sanity"); 3417 assert(DumpSharedSpaces, "sanity");
3237 3418
3262 if (last_addr < top) { 3443 if (last_addr < top) {
3263 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3444 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3264 } 3445 }
3265 } 3446 }
3266 3447
3448 void Metaspace::purge(MetadataType mdtype) {
3449 get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3450 }
3451
3267 void Metaspace::purge() { 3452 void Metaspace::purge() {
3268 MutexLockerEx cl(SpaceManager::expand_lock(), 3453 MutexLockerEx cl(SpaceManager::expand_lock(),
3269 Mutex::_no_safepoint_check_flag); 3454 Mutex::_no_safepoint_check_flag);
3270 space_list()->purge(); 3455 purge(NonClassType);
3271 if (using_class_space()) { 3456 if (using_class_space()) {
3272 class_space_list()->purge(); 3457 purge(ClassType);
3273 } 3458 }
3274 } 3459 }
3275 3460
3276 void Metaspace::print_on(outputStream* out) const { 3461 void Metaspace::print_on(outputStream* out) const {
3277 // Print both class virtual space counts and metaspace. 3462 // Print both class virtual space counts and metaspace.
3309 if (using_class_space()) { 3494 if (using_class_space()) {
3310 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); 3495 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3311 class_vsm()->dump(out); 3496 class_vsm()->dump(out);
3312 } 3497 }
3313 } 3498 }
3499
3500 /////////////// Unit tests ///////////////
3501
3502 #ifndef PRODUCT
3503
3504 class TestMetaspaceAuxTest : AllStatic {
3505 public:
3506 static void test_reserved() {
3507 size_t reserved = MetaspaceAux::reserved_bytes();
3508
3509 assert(reserved > 0, "assert");
3510
3511 size_t committed = MetaspaceAux::committed_bytes();
3512 assert(committed <= reserved, "assert");
3513
3514 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3515 assert(reserved_metadata > 0, "assert");
3516 assert(reserved_metadata <= reserved, "assert");
3517
3518 if (UseCompressedClassPointers) {
3519 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3520 assert(reserved_class > 0, "assert");
3521 assert(reserved_class < reserved, "assert");
3522 }
3523 }
3524
3525 static void test_committed() {
3526 size_t committed = MetaspaceAux::committed_bytes();
3527
3528 assert(committed > 0, "assert");
3529
3530 size_t reserved = MetaspaceAux::reserved_bytes();
3531 assert(committed <= reserved, "assert");
3532
3533 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3534 assert(committed_metadata > 0, "assert");
3535 assert(committed_metadata <= committed, "assert");
3536
3537 if (UseCompressedClassPointers) {
3538 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3539 assert(committed_class > 0, "assert");
3540 assert(committed_class < committed, "assert");
3541 }
3542 }
3543
3544 static void test_virtual_space_list_large_chunk() {
3545 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3546 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3547 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3548 // vm_allocation_granularity aligned on Windows.
3549 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3550 large_size += (os::vm_page_size()/BytesPerWord);
3551 vs_list->get_new_chunk(large_size, large_size, 0);
3552 }
3553
3554 static void test() {
3555 test_reserved();
3556 test_committed();
3557 test_virtual_space_list_large_chunk();
3558 }
3559 };
3560
3561 void TestMetaspaceAux_test() {
3562 TestMetaspaceAuxTest::test();
3563 }
3564
3565 class TestVirtualSpaceNodeTest {
3566 static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3567 size_t& num_small_chunks,
3568 size_t& num_specialized_chunks) {
3569 num_medium_chunks = words_left / MediumChunk;
3570 words_left = words_left % MediumChunk;
3571
3572 num_small_chunks = words_left / SmallChunk;
3573 words_left = words_left % SmallChunk;
3574 // how many specialized chunks can we get?
3575 num_specialized_chunks = words_left / SpecializedChunk;
3576 assert(words_left % SpecializedChunk == 0, "should be nothing left");
3577 }
3578
3579 public:
3580 static void test() {
3581 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3582 const size_t vsn_test_size_words = MediumChunk * 4;
3583 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3584
3585 // The chunk sizes must be multiples of eachother, or this will fail
3586 STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3587 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3588
3589 { // No committed memory in VSN
3590 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3591 VirtualSpaceNode vsn(vsn_test_size_bytes);
3592 vsn.initialize();
3593 vsn.retire(&cm);
3594 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3595 }
3596
3597 { // All of VSN is committed, half is used by chunks
3598 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3599 VirtualSpaceNode vsn(vsn_test_size_bytes);
3600 vsn.initialize();
3601 vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3602 vsn.get_chunk_vs(MediumChunk);
3603 vsn.get_chunk_vs(MediumChunk);
3604 vsn.retire(&cm);
3605 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3606 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3607 }
3608
3609 { // 4 pages of VSN is committed, some is used by chunks
3610 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3611 VirtualSpaceNode vsn(vsn_test_size_bytes);
3612 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3613 assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
3614 vsn.initialize();
3615 vsn.expand_by(page_chunks, page_chunks);
3616 vsn.get_chunk_vs(SmallChunk);
3617 vsn.get_chunk_vs(SpecializedChunk);
3618 vsn.retire(&cm);
3619
3620 // committed - used = words left to retire
3621 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3622
3623 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3624 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3625
3626 assert(num_medium_chunks == 0, "should not get any medium chunks");
3627 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3628 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3629 }
3630
3631 { // Half of VSN is committed, a humongous chunk is used
3632 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3633 VirtualSpaceNode vsn(vsn_test_size_bytes);
3634 vsn.initialize();
3635 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3636 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3637 vsn.retire(&cm);
3638
3639 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3640 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3641 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3642
3643 assert(num_medium_chunks == 0, "should not get any medium chunks");
3644 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3645 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3646 }
3647
3648 }
3649 };
3650
3651 void TestVirtualSpaceNode_test() {
3652 TestVirtualSpaceNodeTest::test();
3653 }
3654
3655 #endif