comparison src/share/vm/memory/allocation.cpp @ 11009:f75faf51e8c4

7158805: Better rewriting of nested subroutine calls Reviewed-by: mschoene, coleenp
author hseigel
date Thu, 07 Mar 2013 11:49:38 -0500
parents 59c790074993
children 4b52137b07c9
comparison
equal deleted inserted replaced
10121:6c560f9ebb3e 11009:f75faf51e8c4
1 /* 1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
246 public: 246 public:
247 // All chunks in a ChunkPool has the same size 247 // All chunks in a ChunkPool has the same size
248 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } 248 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
249 249
250 // Allocate a new chunk from the pool (might expand the pool) 250 // Allocate a new chunk from the pool (might expand the pool)
251 _NOINLINE_ void* allocate(size_t bytes) { 251 _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
252 assert(bytes == _size, "bad size"); 252 assert(bytes == _size, "bad size");
253 void* p = NULL; 253 void* p = NULL;
254 // No VM lock can be taken inside ThreadCritical lock, so os::malloc 254 // No VM lock can be taken inside ThreadCritical lock, so os::malloc
255 // should be done outside ThreadCritical lock due to NMT 255 // should be done outside ThreadCritical lock due to NMT
256 { ThreadCritical tc; 256 { ThreadCritical tc;
257 _num_used++; 257 _num_used++;
258 p = get_first(); 258 p = get_first();
259 } 259 }
260 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); 260 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
261 if (p == NULL) 261 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
262 vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); 262 vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
263 263 }
264 return p; 264 return p;
265 } 265 }
266 266
267 // Return a chunk to the pool 267 // Return a chunk to the pool
268 void free(Chunk* chunk) { 268 void free(Chunk* chunk) {
355 }; 355 };
356 356
357 //-------------------------------------------------------------------------------------- 357 //--------------------------------------------------------------------------------------
358 // Chunk implementation 358 // Chunk implementation
359 359
360 void* Chunk::operator new(size_t requested_size, size_t length) { 360 void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) {
361 // requested_size is equal to sizeof(Chunk) but in order for the arena 361 // requested_size is equal to sizeof(Chunk) but in order for the arena
362 // allocations to come out aligned as expected the size must be aligned 362 // allocations to come out aligned as expected the size must be aligned
363 // to expected arean alignment. 363 // to expected arean alignment.
364 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. 364 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
365 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); 365 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
366 size_t bytes = ARENA_ALIGN(requested_size) + length; 366 size_t bytes = ARENA_ALIGN(requested_size) + length;
367 switch (length) { 367 switch (length) {
368 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); 368 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
369 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); 369 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
370 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); 370 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
371 default: { 371 default: {
372 void *p = os::malloc(bytes, mtChunk, CALLER_PC); 372 void* p = os::malloc(bytes, mtChunk, CALLER_PC);
373 if (p == NULL) 373 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
374 vm_exit_out_of_memory(bytes, "Chunk::new"); 374 vm_exit_out_of_memory(bytes, "Chunk::new");
375 }
375 return p; 376 return p;
376 } 377 }
377 } 378 }
378 } 379 }
379 380
423 NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) 424 NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
424 425
425 Arena::Arena(size_t init_size) { 426 Arena::Arena(size_t init_size) {
426 size_t round_size = (sizeof (char *)) - 1; 427 size_t round_size = (sizeof (char *)) - 1;
427 init_size = (init_size+round_size) & ~round_size; 428 init_size = (init_size+round_size) & ~round_size;
428 _first = _chunk = new (init_size) Chunk(init_size); 429 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
429 _hwm = _chunk->bottom(); // Save the cached hwm, max 430 _hwm = _chunk->bottom(); // Save the cached hwm, max
430 _max = _chunk->top(); 431 _max = _chunk->top();
431 set_size_in_bytes(init_size); 432 set_size_in_bytes(init_size);
432 NOT_PRODUCT(Atomic::inc(&_instance_count);) 433 NOT_PRODUCT(Atomic::inc(&_instance_count);)
433 } 434 }
434 435
435 Arena::Arena() { 436 Arena::Arena() {
436 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); 437 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
437 _hwm = _chunk->bottom(); // Save the cached hwm, max 438 _hwm = _chunk->bottom(); // Save the cached hwm, max
438 _max = _chunk->top(); 439 _max = _chunk->top();
439 set_size_in_bytes(Chunk::init_size); 440 set_size_in_bytes(Chunk::init_size);
440 NOT_PRODUCT(Atomic::inc(&_instance_count);) 441 NOT_PRODUCT(Atomic::inc(&_instance_count);)
441 } 442 }
538 void* Arena::grow(size_t x, AllocFailType alloc_failmode) { 539 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
539 // Get minimal required size. Either real big, or even bigger for giant objs 540 // Get minimal required size. Either real big, or even bigger for giant objs
540 size_t len = MAX2(x, (size_t) Chunk::size); 541 size_t len = MAX2(x, (size_t) Chunk::size);
541 542
542 Chunk *k = _chunk; // Get filled-up chunk address 543 Chunk *k = _chunk; // Get filled-up chunk address
543 _chunk = new (len) Chunk(len); 544 _chunk = new (alloc_failmode, len) Chunk(len);
544 545
545 if (_chunk == NULL) { 546 if (_chunk == NULL) {
546 if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
547 signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
548 }
549 return NULL; 547 return NULL;
550 } 548 }
551 if (k) k->set_next(_chunk); // Append new chunk to end of linked list 549 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
552 else _first = _chunk; 550 else _first = _chunk;
553 _hwm = _chunk->bottom(); // Save the cached hwm, max 551 _hwm = _chunk->bottom(); // Save the cached hwm, max