comparison src/share/vm/memory/allocation.cpp @ 6872:7b5885dadbdc

8000617: It should be possible to allocate memory without the VM dying. Reviewed-by: coleenp, kamg
author nloodin
date Wed, 17 Oct 2012 17:36:48 +0200
parents da91efe96a93
children 716c64bda5ba
comparison
equal deleted inserted replaced
6871:045cb62046a7 6872:7b5885dadbdc
90 ShouldNotReachHere(); 90 ShouldNotReachHere();
91 } 91 }
92 return res; 92 return res;
93 } 93 }
94 94
95 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
96 allocation_type type, MEMFLAGS flags) {
97 //should only call this with std::nothrow, use other operator new() otherwise
98 address res;
99 switch (type) {
100 case C_HEAP:
101 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
102 DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);)
103 break;
104 case RESOURCE_AREA:
105 // new(size) sets allocation type RESOURCE_AREA.
106 res = (address)operator new(size, std::nothrow);
107 break;
108 default:
109 ShouldNotReachHere();
110 }
111 return res;
112 }
113
114
95 void ResourceObj::operator delete(void* p) { 115 void ResourceObj::operator delete(void* p) {
96 assert(((ResourceObj *)p)->allocated_on_C_heap(), 116 assert(((ResourceObj *)p)->allocated_on_C_heap(),
97 "delete only allowed for C_HEAP objects"); 117 "delete only allowed for C_HEAP objects");
98 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) 118 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
99 FreeHeap(p); 119 FreeHeap(p);
504 void Arena::signal_out_of_memory(size_t sz, const char* whence) const { 524 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
505 vm_exit_out_of_memory(sz, whence); 525 vm_exit_out_of_memory(sz, whence);
506 } 526 }
507 527
508 // Grow a new Chunk 528 // Grow a new Chunk
509 void* Arena::grow( size_t x ) { 529 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
510 // Get minimal required size. Either real big, or even bigger for giant objs 530 // Get minimal required size. Either real big, or even bigger for giant objs
511 size_t len = MAX2(x, (size_t) Chunk::size); 531 size_t len = MAX2(x, (size_t) Chunk::size);
512 532
513 Chunk *k = _chunk; // Get filled-up chunk address 533 Chunk *k = _chunk; // Get filled-up chunk address
514 _chunk = new (len) Chunk(len); 534 _chunk = new (len) Chunk(len);
515 535
516 if (_chunk == NULL) { 536 if (_chunk == NULL) {
517 signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); 537 if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
538 signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
539 }
540 return NULL;
518 } 541 }
519 if (k) k->set_next(_chunk); // Append new chunk to end of linked list 542 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
520 else _first = _chunk; 543 else _first = _chunk;
521 _hwm = _chunk->bottom(); // Save the cached hwm, max 544 _hwm = _chunk->bottom(); // Save the cached hwm, max
522 _max = _chunk->top(); 545 _max = _chunk->top();
527 } 550 }
528 551
529 552
530 553
531 // Reallocate storage in Arena. 554 // Reallocate storage in Arena.
532 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) { 555 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
533 assert(new_size >= 0, "bad size"); 556 assert(new_size >= 0, "bad size");
534 if (new_size == 0) return NULL; 557 if (new_size == 0) return NULL;
535 #ifdef ASSERT 558 #ifdef ASSERT
536 if (UseMallocOnly) { 559 if (UseMallocOnly) {
537 // always allocate a new object (otherwise we'll free this one twice) 560 // always allocate a new object (otherwise we'll free this one twice)
538 char* copy = (char*)Amalloc(new_size); 561 char* copy = (char*)Amalloc(new_size, alloc_failmode);
562 if (copy == NULL) {
563 return NULL;
564 }
539 size_t n = MIN2(old_size, new_size); 565 size_t n = MIN2(old_size, new_size);
540 if (n > 0) memcpy(copy, old_ptr, n); 566 if (n > 0) memcpy(copy, old_ptr, n);
541 Afree(old_ptr,old_size); // Mostly done to keep stats accurate 567 Afree(old_ptr,old_size); // Mostly done to keep stats accurate
542 return copy; 568 return copy;
543 } 569 }
559 _hwm = c_old+corrected_new_size; // Adjust hwm 585 _hwm = c_old+corrected_new_size; // Adjust hwm
560 return c_old; // Return old pointer 586 return c_old; // Return old pointer
561 } 587 }
562 588
563 // Oops, got to relocate guts 589 // Oops, got to relocate guts
564 void *new_ptr = Amalloc(new_size); 590 void *new_ptr = Amalloc(new_size, alloc_failmode);
591 if (new_ptr == NULL) {
592 return NULL;
593 }
565 memcpy( new_ptr, c_old, old_size ); 594 memcpy( new_ptr, c_old, old_size );
566 Afree(c_old,old_size); // Mostly done to keep stats accurate 595 Afree(c_old,old_size); // Mostly done to keep stats accurate
567 return new_ptr; 596 return new_ptr;
568 } 597 }
569 598