Mercurial > hg > graal-compiler
comparison src/share/vm/memory/allocation.cpp @ 6197:d2a62e0f25eb
6995781: Native Memory Tracking (Phase 1)
7151532: DCmd for hotspot native memory tracking
Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd
Reviewed-by: acorn, coleenp, fparain
author | zgu |
---|---|
date | Thu, 28 Jun 2012 17:03:16 -0400 |
parents | f08d439fab8c |
children | da91efe96a93 |
comparison
equal
deleted
inserted
replaced
6174:74533f63b116 | 6197:d2a62e0f25eb |
---|---|
24 | 24 |
25 #include "precompiled.hpp" | 25 #include "precompiled.hpp" |
26 #include "memory/allocation.hpp" | 26 #include "memory/allocation.hpp" |
27 #include "memory/allocation.inline.hpp" | 27 #include "memory/allocation.inline.hpp" |
28 #include "memory/resourceArea.hpp" | 28 #include "memory/resourceArea.hpp" |
29 #include "runtime/atomic.hpp" | |
29 #include "runtime/os.hpp" | 30 #include "runtime/os.hpp" |
30 #include "runtime/task.hpp" | 31 #include "runtime/task.hpp" |
31 #include "runtime/threadCritical.hpp" | 32 #include "runtime/threadCritical.hpp" |
33 #include "services/memTracker.hpp" | |
32 #include "utilities/ostream.hpp" | 34 #include "utilities/ostream.hpp" |
35 | |
33 #ifdef TARGET_OS_FAMILY_linux | 36 #ifdef TARGET_OS_FAMILY_linux |
34 # include "os_linux.inline.hpp" | 37 # include "os_linux.inline.hpp" |
35 #endif | 38 #endif |
36 #ifdef TARGET_OS_FAMILY_solaris | 39 #ifdef TARGET_OS_FAMILY_solaris |
37 # include "os_solaris.inline.hpp" | 40 # include "os_solaris.inline.hpp" |
40 # include "os_windows.inline.hpp" | 43 # include "os_windows.inline.hpp" |
41 #endif | 44 #endif |
42 #ifdef TARGET_OS_FAMILY_bsd | 45 #ifdef TARGET_OS_FAMILY_bsd |
43 # include "os_bsd.inline.hpp" | 46 # include "os_bsd.inline.hpp" |
44 #endif | 47 #endif |
45 | |
46 void* CHeapObj::operator new(size_t size){ | |
47 return (void *) AllocateHeap(size, "CHeapObj-new"); | |
48 } | |
49 | |
50 void* CHeapObj::operator new (size_t size, const std::nothrow_t& nothrow_constant) { | |
51 char* p = (char*) os::malloc(size); | |
52 #ifdef ASSERT | |
53 if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); | |
54 #endif | |
55 return p; | |
56 } | |
57 | |
58 void CHeapObj::operator delete(void* p){ | |
59 FreeHeap(p); | |
60 } | |
61 | 48 |
62 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; | 49 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; |
63 void StackObj::operator delete(void* p) { ShouldNotCallThis(); }; | 50 void StackObj::operator delete(void* p) { ShouldNotCallThis(); }; |
64 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; | 51 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; |
65 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }; | 52 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }; |
66 | 53 |
67 void* ResourceObj::operator new(size_t size, allocation_type type) { | 54 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) { |
68 address res; | 55 address res; |
69 switch (type) { | 56 switch (type) { |
70 case C_HEAP: | 57 case C_HEAP: |
71 res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ"); | 58 res = (address)AllocateHeap(size, flags, CALLER_PC); |
72 DEBUG_ONLY(set_allocation_type(res, C_HEAP);) | 59 DEBUG_ONLY(set_allocation_type(res, C_HEAP);) |
73 break; | 60 break; |
74 case RESOURCE_AREA: | 61 case RESOURCE_AREA: |
75 // new(size) sets allocation type RESOURCE_AREA. | 62 // new(size) sets allocation type RESOURCE_AREA. |
76 res = (address)operator new(size); | 63 res = (address)operator new(size); |
182 //-------------------------------------------------------------------------------------- | 169 //-------------------------------------------------------------------------------------- |
183 // ChunkPool implementation | 170 // ChunkPool implementation |
184 | 171 |
185 // MT-safe pool of chunks to reduce malloc/free thrashing | 172 // MT-safe pool of chunks to reduce malloc/free thrashing |
186 // NB: not using Mutex because pools are used before Threads are initialized | 173 // NB: not using Mutex because pools are used before Threads are initialized |
187 class ChunkPool { | 174 class ChunkPool: public CHeapObj<mtInternal> { |
188 Chunk* _first; // first cached Chunk; its first word points to next chunk | 175 Chunk* _first; // first cached Chunk; its first word points to next chunk |
189 size_t _num_chunks; // number of unused chunks in pool | 176 size_t _num_chunks; // number of unused chunks in pool |
190 size_t _num_used; // number of chunks currently checked out | 177 size_t _num_used; // number of chunks currently checked out |
191 const size_t _size; // size of each chunk (must be uniform) | 178 const size_t _size; // size of each chunk (must be uniform) |
192 | 179 |
208 public: | 195 public: |
209 // All chunks in a ChunkPool has the same size | 196 // All chunks in a ChunkPool has the same size |
210 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } | 197 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } |
211 | 198 |
212 // Allocate a new chunk from the pool (might expand the pool) | 199 // Allocate a new chunk from the pool (might expand the pool) |
213 void* allocate(size_t bytes) { | 200 _NOINLINE_ void* allocate(size_t bytes) { |
214 assert(bytes == _size, "bad size"); | 201 assert(bytes == _size, "bad size"); |
215 void* p = NULL; | 202 void* p = NULL; |
203 // No VM lock can be taken inside ThreadCritical lock, so os::malloc | |
204 // should be done outside ThreadCritical lock due to NMT | |
216 { ThreadCritical tc; | 205 { ThreadCritical tc; |
217 _num_used++; | 206 _num_used++; |
218 p = get_first(); | 207 p = get_first(); |
219 if (p == NULL) p = os::malloc(bytes); | 208 } |
220 } | 209 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); |
221 if (p == NULL) | 210 if (p == NULL) |
222 vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); | 211 vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); |
223 | 212 |
224 return p; | 213 return p; |
225 } | 214 } |
236 _num_chunks++; | 225 _num_chunks++; |
237 } | 226 } |
238 | 227 |
239 // Prune the pool | 228 // Prune the pool |
240 void free_all_but(size_t n) { | 229 void free_all_but(size_t n) { |
230 Chunk* cur = NULL; | |
231 Chunk* next; | |
232 { | |
241 // if we have more than n chunks, free all of them | 233 // if we have more than n chunks, free all of them |
242 ThreadCritical tc; | 234 ThreadCritical tc; |
243 if (_num_chunks > n) { | 235 if (_num_chunks > n) { |
244 // free chunks at end of queue, for better locality | 236 // free chunks at end of queue, for better locality |
245 Chunk* cur = _first; | 237 cur = _first; |
246 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); | 238 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); |
247 | 239 |
248 if (cur != NULL) { | 240 if (cur != NULL) { |
249 Chunk* next = cur->next(); | 241 next = cur->next(); |
250 cur->set_next(NULL); | 242 cur->set_next(NULL); |
251 cur = next; | 243 cur = next; |
252 | 244 |
253 // Free all remaining chunks | 245 _num_chunks = n; |
246 } | |
247 } | |
248 } | |
249 | |
250 // Free all remaining chunks, outside of ThreadCritical | |
251 // to avoid deadlock with NMT | |
254 while(cur != NULL) { | 252 while(cur != NULL) { |
255 next = cur->next(); | 253 next = cur->next(); |
256 os::free(cur); | 254 os::free(cur, mtChunk); |
257 _num_chunks--; | |
258 cur = next; | 255 cur = next; |
259 } | 256 } |
260 } | 257 } |
261 } | |
262 } | |
263 | 258 |
264 // Accessors to preallocated pool's | 259 // Accessors to preallocated pool's |
265 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } | 260 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } |
266 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } | 261 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } |
267 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } | 262 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } |
321 switch (length) { | 316 switch (length) { |
322 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); | 317 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); |
323 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); | 318 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); |
324 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); | 319 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); |
325 default: { | 320 default: { |
326 void *p = os::malloc(bytes); | 321 void *p = os::malloc(bytes, mtChunk, CALLER_PC); |
327 if (p == NULL) | 322 if (p == NULL) |
328 vm_exit_out_of_memory(bytes, "Chunk::new"); | 323 vm_exit_out_of_memory(bytes, "Chunk::new"); |
329 return p; | 324 return p; |
330 } | 325 } |
331 } | 326 } |
335 Chunk* c = (Chunk*)p; | 330 Chunk* c = (Chunk*)p; |
336 switch (c->length()) { | 331 switch (c->length()) { |
337 case Chunk::size: ChunkPool::large_pool()->free(c); break; | 332 case Chunk::size: ChunkPool::large_pool()->free(c); break; |
338 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; | 333 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; |
339 case Chunk::init_size: ChunkPool::small_pool()->free(c); break; | 334 case Chunk::init_size: ChunkPool::small_pool()->free(c); break; |
340 default: os::free(c); | 335 default: os::free(c, mtChunk); |
341 } | 336 } |
342 } | 337 } |
343 | 338 |
344 Chunk::Chunk(size_t length) : _len(length) { | 339 Chunk::Chunk(size_t length) : _len(length) { |
345 _next = NULL; // Chain on the linked list | 340 _next = NULL; // Chain on the linked list |
372 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); | 367 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); |
373 cleaner->enroll(); | 368 cleaner->enroll(); |
374 } | 369 } |
375 | 370 |
376 //------------------------------Arena------------------------------------------ | 371 //------------------------------Arena------------------------------------------ |
372 NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) | |
377 | 373 |
378 Arena::Arena(size_t init_size) { | 374 Arena::Arena(size_t init_size) { |
379 size_t round_size = (sizeof (char *)) - 1; | 375 size_t round_size = (sizeof (char *)) - 1; |
380 init_size = (init_size+round_size) & ~round_size; | 376 init_size = (init_size+round_size) & ~round_size; |
381 _first = _chunk = new (init_size) Chunk(init_size); | 377 _first = _chunk = new (init_size) Chunk(init_size); |
382 _hwm = _chunk->bottom(); // Save the cached hwm, max | 378 _hwm = _chunk->bottom(); // Save the cached hwm, max |
383 _max = _chunk->top(); | 379 _max = _chunk->top(); |
384 set_size_in_bytes(init_size); | 380 set_size_in_bytes(init_size); |
381 NOT_PRODUCT(Atomic::inc(&_instance_count);) | |
385 } | 382 } |
386 | 383 |
387 Arena::Arena() { | 384 Arena::Arena() { |
388 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); | 385 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); |
389 _hwm = _chunk->bottom(); // Save the cached hwm, max | 386 _hwm = _chunk->bottom(); // Save the cached hwm, max |
390 _max = _chunk->top(); | 387 _max = _chunk->top(); |
391 set_size_in_bytes(Chunk::init_size); | 388 set_size_in_bytes(Chunk::init_size); |
389 NOT_PRODUCT(Atomic::inc(&_instance_count);) | |
392 } | 390 } |
393 | 391 |
394 Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) { | 392 Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) { |
395 set_size_in_bytes(a->size_in_bytes()); | 393 set_size_in_bytes(a->size_in_bytes()); |
396 } | 394 NOT_PRODUCT(Atomic::inc(&_instance_count);) |
395 } | |
396 | |
397 | 397 |
398 Arena *Arena::move_contents(Arena *copy) { | 398 Arena *Arena::move_contents(Arena *copy) { |
399 copy->destruct_contents(); | 399 copy->destruct_contents(); |
400 copy->_chunk = _chunk; | 400 copy->_chunk = _chunk; |
401 copy->_hwm = _hwm; | 401 copy->_hwm = _hwm; |
407 return copy; // Return Arena with contents | 407 return copy; // Return Arena with contents |
408 } | 408 } |
409 | 409 |
410 Arena::~Arena() { | 410 Arena::~Arena() { |
411 destruct_contents(); | 411 destruct_contents(); |
412 NOT_PRODUCT(Atomic::dec(&_instance_count);) | |
413 } | |
414 | |
415 void* Arena::operator new(size_t size) { | |
416 assert(false, "Use dynamic memory type binding"); | |
417 return NULL; | |
418 } | |
419 | |
420 void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) { | |
421 assert(false, "Use dynamic memory type binding"); | |
422 return NULL; | |
423 } | |
424 | |
425 // dynamic memory type binding | |
426 void* Arena::operator new(size_t size, MEMFLAGS flags) { | |
427 #ifdef ASSERT | |
428 void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); | |
429 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); | |
430 return p; | |
431 #else | |
432 return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); | |
433 #endif | |
434 } | |
435 | |
436 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) { | |
437 #ifdef ASSERT | |
438 void* p = os::malloc(size, flags|otArena, CALLER_PC); | |
439 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); | |
440 return p; | |
441 #else | |
442 return os::malloc(size, flags|otArena, CALLER_PC); | |
443 #endif | |
444 } | |
445 | |
446 void Arena::operator delete(void* p) { | |
447 FreeHeap(p); | |
412 } | 448 } |
413 | 449 |
414 // Destroy this arenas contents and reset to empty | 450 // Destroy this arenas contents and reset to empty |
415 void Arena::destruct_contents() { | 451 void Arena::destruct_contents() { |
416 if (UseMallocOnly && _first != NULL) { | 452 if (UseMallocOnly && _first != NULL) { |
419 } | 455 } |
420 _first->chop(); | 456 _first->chop(); |
421 reset(); | 457 reset(); |
422 } | 458 } |
423 | 459 |
460 // This is high traffic method, but many calls actually don't | |
461 // change the size | |
462 void Arena::set_size_in_bytes(size_t size) { | |
463 if (_size_in_bytes != size) { | |
464 _size_in_bytes = size; | |
465 MemTracker::record_arena_size((address)this, size); | |
466 } | |
467 } | |
424 | 468 |
425 // Total of all Chunks in arena | 469 // Total of all Chunks in arena |
426 size_t Arena::used() const { | 470 size_t Arena::used() const { |
427 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk | 471 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk |
428 register Chunk *k = _first; | 472 register Chunk *k = _first; |
446 _chunk = new (len) Chunk(len); | 490 _chunk = new (len) Chunk(len); |
447 | 491 |
448 if (_chunk == NULL) { | 492 if (_chunk == NULL) { |
449 signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); | 493 signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); |
450 } | 494 } |
451 | |
452 if (k) k->set_next(_chunk); // Append new chunk to end of linked list | 495 if (k) k->set_next(_chunk); // Append new chunk to end of linked list |
453 else _first = _chunk; | 496 else _first = _chunk; |
454 _hwm = _chunk->bottom(); // Save the cached hwm, max | 497 _hwm = _chunk->bottom(); // Save the cached hwm, max |
455 _max = _chunk->top(); | 498 _max = _chunk->top(); |
456 set_size_in_bytes(size_in_bytes() + len); | 499 set_size_in_bytes(size_in_bytes() + len); |
536 #ifdef ASSERT | 579 #ifdef ASSERT |
537 void* Arena::malloc(size_t size) { | 580 void* Arena::malloc(size_t size) { |
538 assert(UseMallocOnly, "shouldn't call"); | 581 assert(UseMallocOnly, "shouldn't call"); |
539 // use malloc, but save pointer in res. area for later freeing | 582 // use malloc, but save pointer in res. area for later freeing |
540 char** save = (char**)internal_malloc_4(sizeof(char*)); | 583 char** save = (char**)internal_malloc_4(sizeof(char*)); |
541 return (*save = (char*)os::malloc(size)); | 584 return (*save = (char*)os::malloc(size, mtChunk)); |
542 } | 585 } |
543 | 586 |
544 // for debugging with UseMallocOnly | 587 // for debugging with UseMallocOnly |
545 void* Arena::internal_malloc_4(size_t x) { | 588 void* Arena::internal_malloc_4(size_t x) { |
546 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); | 589 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); |