comparison src/share/vm/memory/allocation.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children c18cbe5936b8
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_allocation.cpp.incl"
27
28 void* CHeapObj::operator new(size_t size){
29 return (void *) AllocateHeap(size, "CHeapObj-new");
30 }
31
32 void CHeapObj::operator delete(void* p){
33 FreeHeap(p);
34 }
35
36 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
37 void StackObj::operator delete(void* p) { ShouldNotCallThis(); };
38 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
39 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); };
40
41 void* ResourceObj::operator new(size_t size, allocation_type type) {
42 address res;
43 switch (type) {
44 case C_HEAP:
45 res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
46 break;
47 case RESOURCE_AREA:
48 res = (address)operator new(size);
49 break;
50 default:
51 ShouldNotReachHere();
52 }
53 // Set allocation type in the resource object for assertion checks.
54 DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
55 return res;
56 }
57
58 void ResourceObj::operator delete(void* p) {
59 assert(((ResourceObj *)p)->allocated_on_C_heap(),
60 "delete only allowed for C_HEAP objects");
61 FreeHeap(p);
62 }
63
64 void trace_heap_malloc(size_t size, const char* name, void* p) {
65 // A lock is not needed here - tty uses a lock internally
66 tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name);
67 }
68
69
70 void trace_heap_free(void* p) {
71 // A lock is not needed here - tty uses a lock internally
72 tty->print_cr("Heap free " INTPTR_FORMAT, p);
73 }
74
75 bool warn_new_operator = false; // see vm_main
76
77 //--------------------------------------------------------------------------------------
78 // ChunkPool implementation
79
80 // MT-safe pool of chunks to reduce malloc/free thrashing
81 // NB: not using Mutex because pools are used before Threads are initialized
82 class ChunkPool {
83 Chunk* _first; // first cached Chunk; its first word points to next chunk
84 size_t _num_chunks; // number of unused chunks in pool
85 size_t _num_used; // number of chunks currently checked out
86 const size_t _size; // size of each chunk (must be uniform)
87
88 // Our three static pools
89 static ChunkPool* _large_pool;
90 static ChunkPool* _medium_pool;
91 static ChunkPool* _small_pool;
92
93 // return first element or null
94 void* get_first() {
95 Chunk* c = _first;
96 if (_first) {
97 _first = _first->next();
98 _num_chunks--;
99 }
100 return c;
101 }
102
103 public:
104 // All chunks in a ChunkPool has the same size
105 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
106
107 // Allocate a new chunk from the pool (might expand the pool)
108 void* allocate(size_t bytes) {
109 assert(bytes == _size, "bad size");
110 void* p = NULL;
111 { ThreadCritical tc;
112 _num_used++;
113 p = get_first();
114 if (p == NULL) p = os::malloc(bytes);
115 }
116 if (p == NULL)
117 vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
118
119 return p;
120 }
121
122 // Return a chunk to the pool
123 void free(Chunk* chunk) {
124 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
125 ThreadCritical tc;
126 _num_used--;
127
128 // Add chunk to list
129 chunk->set_next(_first);
130 _first = chunk;
131 _num_chunks++;
132 }
133
134 // Prune the pool
135 void free_all_but(size_t n) {
136 // if we have more than n chunks, free all of them
137 ThreadCritical tc;
138 if (_num_chunks > n) {
139 // free chunks at end of queue, for better locality
140 Chunk* cur = _first;
141 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
142
143 if (cur != NULL) {
144 Chunk* next = cur->next();
145 cur->set_next(NULL);
146 cur = next;
147
148 // Free all remaining chunks
149 while(cur != NULL) {
150 next = cur->next();
151 os::free(cur);
152 _num_chunks--;
153 cur = next;
154 }
155 }
156 }
157 }
158
159 // Accessors to preallocated pool's
160 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
161 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
162 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
163
164 static void initialize() {
165 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
166 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
167 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
168 }
169 };
170
171 ChunkPool* ChunkPool::_large_pool = NULL;
172 ChunkPool* ChunkPool::_medium_pool = NULL;
173 ChunkPool* ChunkPool::_small_pool = NULL;
174
175
176 void chunkpool_init() {
177 ChunkPool::initialize();
178 }
179
180
181 //--------------------------------------------------------------------------------------
182 // ChunkPoolCleaner implementation
183
184 class ChunkPoolCleaner : public PeriodicTask {
185 enum { CleaningInterval = 5000, // cleaning interval in ms
186 BlocksToKeep = 5 // # of extra blocks to keep
187 };
188
189 public:
190 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
191 void task() {
192 ChunkPool::small_pool()->free_all_but(BlocksToKeep);
193 ChunkPool::medium_pool()->free_all_but(BlocksToKeep);
194 ChunkPool::large_pool()->free_all_but(BlocksToKeep);
195 }
196 };
197
198 //--------------------------------------------------------------------------------------
199 // Chunk implementation
200
201 void* Chunk::operator new(size_t requested_size, size_t length) {
202 // requested_size is equal to sizeof(Chunk) but in order for the arena
203 // allocations to come out aligned as expected the size must be aligned
204 // to expected arean alignment.
205 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
206 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
207 size_t bytes = ARENA_ALIGN(requested_size) + length;
208 switch (length) {
209 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes);
210 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
211 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes);
212 default: {
213 void *p = os::malloc(bytes);
214 if (p == NULL)
215 vm_exit_out_of_memory(bytes, "Chunk::new");
216 return p;
217 }
218 }
219 }
220
221 void Chunk::operator delete(void* p) {
222 Chunk* c = (Chunk*)p;
223 switch (c->length()) {
224 case Chunk::size: ChunkPool::large_pool()->free(c); break;
225 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
226 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
227 default: os::free(c);
228 }
229 }
230
231 Chunk::Chunk(size_t length) : _len(length) {
232 _next = NULL; // Chain on the linked list
233 }
234
235
236 void Chunk::chop() {
237 Chunk *k = this;
238 while( k ) {
239 Chunk *tmp = k->next();
240 // clear out this chunk (to detect allocation bugs)
241 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
242 delete k; // Free chunk (was malloc'd)
243 k = tmp;
244 }
245 }
246
247 void Chunk::next_chop() {
248 _next->chop();
249 _next = NULL;
250 }
251
252
253 void Chunk::start_chunk_pool_cleaner_task() {
254 #ifdef ASSERT
255 static bool task_created = false;
256 assert(!task_created, "should not start chuck pool cleaner twice");
257 task_created = true;
258 #endif
259 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
260 cleaner->enroll();
261 }
262
263 //------------------------------Arena------------------------------------------
264
265 Arena::Arena(size_t init_size) {
266 size_t round_size = (sizeof (char *)) - 1;
267 init_size = (init_size+round_size) & ~round_size;
268 _first = _chunk = new (init_size) Chunk(init_size);
269 _hwm = _chunk->bottom(); // Save the cached hwm, max
270 _max = _chunk->top();
271 set_size_in_bytes(init_size);
272 }
273
274 Arena::Arena() {
275 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
276 _hwm = _chunk->bottom(); // Save the cached hwm, max
277 _max = _chunk->top();
278 set_size_in_bytes(Chunk::init_size);
279 }
280
281 Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
282 set_size_in_bytes(a->size_in_bytes());
283 }
284
285 Arena *Arena::move_contents(Arena *copy) {
286 copy->destruct_contents();
287 copy->_chunk = _chunk;
288 copy->_hwm = _hwm;
289 copy->_max = _max;
290 copy->_first = _first;
291 copy->set_size_in_bytes(size_in_bytes());
292 // Destroy original arena
293 reset();
294 return copy; // Return Arena with contents
295 }
296
297 Arena::~Arena() {
298 destruct_contents();
299 }
300
301 // Destroy this arenas contents and reset to empty
302 void Arena::destruct_contents() {
303 if (UseMallocOnly && _first != NULL) {
304 char* end = _first->next() ? _first->top() : _hwm;
305 free_malloced_objects(_first, _first->bottom(), end, _hwm);
306 }
307 _first->chop();
308 reset();
309 }
310
311
312 // Total of all Chunks in arena
313 size_t Arena::used() const {
314 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
315 register Chunk *k = _first;
316 while( k != _chunk) { // Whilst have Chunks in a row
317 sum += k->length(); // Total size of this Chunk
318 k = k->next(); // Bump along to next Chunk
319 }
320 return sum; // Return total consumed space.
321 }
322
323
324 // Grow a new Chunk
325 void* Arena::grow( size_t x ) {
326 // Get minimal required size. Either real big, or even bigger for giant objs
327 size_t len = MAX2(x, (size_t) Chunk::size);
328
329 Chunk *k = _chunk; // Get filled-up chunk address
330 _chunk = new (len) Chunk(len);
331
332 if (_chunk == NULL)
333 vm_exit_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
334
335 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
336 else _first = _chunk;
337 _hwm = _chunk->bottom(); // Save the cached hwm, max
338 _max = _chunk->top();
339 set_size_in_bytes(size_in_bytes() + len);
340 void* result = _hwm;
341 _hwm += x;
342 return result;
343 }
344
345
346
347 // Reallocate storage in Arena.
348 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) {
349 assert(new_size >= 0, "bad size");
350 if (new_size == 0) return NULL;
351 #ifdef ASSERT
352 if (UseMallocOnly) {
353 // always allocate a new object (otherwise we'll free this one twice)
354 char* copy = (char*)Amalloc(new_size);
355 size_t n = MIN2(old_size, new_size);
356 if (n > 0) memcpy(copy, old_ptr, n);
357 Afree(old_ptr,old_size); // Mostly done to keep stats accurate
358 return copy;
359 }
360 #endif
361 char *c_old = (char*)old_ptr; // Handy name
362 // Stupid fast special case
363 if( new_size <= old_size ) { // Shrink in-place
364 if( c_old+old_size == _hwm) // Attempt to free the excess bytes
365 _hwm = c_old+new_size; // Adjust hwm
366 return c_old;
367 }
368
369 // make sure that new_size is legal
370 size_t corrected_new_size = ARENA_ALIGN(new_size);
371
372 // See if we can resize in-place
373 if( (c_old+old_size == _hwm) && // Adjusting recent thing
374 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits
375 _hwm = c_old+corrected_new_size; // Adjust hwm
376 return c_old; // Return old pointer
377 }
378
379 // Oops, got to relocate guts
380 void *new_ptr = Amalloc(new_size);
381 memcpy( new_ptr, c_old, old_size );
382 Afree(c_old,old_size); // Mostly done to keep stats accurate
383 return new_ptr;
384 }
385
386
387 // Determine if pointer belongs to this Arena or not.
388 bool Arena::contains( const void *ptr ) const {
389 #ifdef ASSERT
390 if (UseMallocOnly) {
391 // really slow, but not easy to make fast
392 if (_chunk == NULL) return false;
393 char** bottom = (char**)_chunk->bottom();
394 for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
395 if (*p == ptr) return true;
396 }
397 for (Chunk *c = _first; c != NULL; c = c->next()) {
398 if (c == _chunk) continue; // current chunk has been processed
399 char** bottom = (char**)c->bottom();
400 for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
401 if (*p == ptr) return true;
402 }
403 }
404 return false;
405 }
406 #endif
407 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
408 return true; // Check for in this chunk
409 for (Chunk *c = _first; c; c = c->next()) {
410 if (c == _chunk) continue; // current chunk has been processed
411 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
412 return true; // Check for every chunk in Arena
413 }
414 }
415 return false; // Not in any Chunk, so not in Arena
416 }
417
418
419 #ifdef ASSERT
420 void* Arena::malloc(size_t size) {
421 assert(UseMallocOnly, "shouldn't call");
422 // use malloc, but save pointer in res. area for later freeing
423 char** save = (char**)internal_malloc_4(sizeof(char*));
424 return (*save = (char*)os::malloc(size));
425 }
426
427 // for debugging with UseMallocOnly
428 void* Arena::internal_malloc_4(size_t x) {
429 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
430 if (_hwm + x > _max) {
431 return grow(x);
432 } else {
433 char *old = _hwm;
434 _hwm += x;
435 return old;
436 }
437 }
438 #endif
439
440
441 //--------------------------------------------------------------------------------------
442 // Non-product code
443
444 #ifndef PRODUCT
445 // The global operator new should never be called since it will usually indicate
446 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit
447 // that they're allocated on the C heap.
448 // Commented out in product version to avoid conflicts with third-party C++ native code.
449 // %% note this is causing a problem on solaris debug build. the global
450 // new is being called from jdk source and causing data corruption.
451 // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
452 // define CATCH_OPERATOR_NEW_USAGE if you want to use this.
453 #ifdef CATCH_OPERATOR_NEW_USAGE
454 void* operator new(size_t size){
455 static bool warned = false;
456 if (!warned && warn_new_operator)
457 warning("should not call global (default) operator new");
458 warned = true;
459 return (void *) AllocateHeap(size, "global operator new");
460 }
461 #endif
462
463 void AllocatedObj::print() const { print_on(tty); }
464 void AllocatedObj::print_value() const { print_value_on(tty); }
465
466 void AllocatedObj::print_on(outputStream* st) const {
467 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this);
468 }
469
470 void AllocatedObj::print_value_on(outputStream* st) const {
471 st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
472 }
473
474 size_t Arena::_bytes_allocated = 0;
475
476 AllocStats::AllocStats() {
477 start_mallocs = os::num_mallocs;
478 start_frees = os::num_frees;
479 start_malloc_bytes = os::alloc_bytes;
480 start_res_bytes = Arena::_bytes_allocated;
481 }
482
483 int AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
484 size_t AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
485 size_t AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
486 int AllocStats::num_frees() { return os::num_frees - start_frees; }
487 void AllocStats::print() {
488 tty->print("%d mallocs (%ldK), %d frees, %ldK resrc",
489 num_mallocs(), alloc_bytes()/K, num_frees(), resource_bytes()/K);
490 }
491
492
493 // debugging code
494 inline void Arena::free_all(char** start, char** end) {
495 for (char** p = start; p < end; p++) if (*p) os::free(*p);
496 }
497
498 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
499 assert(UseMallocOnly, "should not call");
500 // free all objects malloced since resource mark was created; resource area
501 // contains their addresses
502 if (chunk->next()) {
503 // this chunk is full, and some others too
504 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
505 char* top = c->top();
506 if (c->next() == NULL) {
507 top = hwm2; // last junk is only used up to hwm2
508 assert(c->contains(hwm2), "bad hwm2");
509 }
510 free_all((char**)c->bottom(), (char**)top);
511 }
512 assert(chunk->contains(hwm), "bad hwm");
513 assert(chunk->contains(max), "bad max");
514 free_all((char**)hwm, (char**)max);
515 } else {
516 // this chunk was partially used
517 assert(chunk->contains(hwm), "bad hwm");
518 assert(chunk->contains(hwm2), "bad hwm2");
519 free_all((char**)hwm, (char**)hwm2);
520 }
521 }
522
523
524 ReallocMark::ReallocMark() {
525 #ifdef ASSERT
526 Thread *thread = ThreadLocalStorage::get_thread_slow();
527 _nesting = thread->resource_area()->nesting();
528 #endif
529 }
530
531 void ReallocMark::check() {
532 #ifdef ASSERT
533 if (_nesting != Thread::current()->resource_area()->nesting()) {
534 fatal("allocation bug: array could grow within nested ResourceMark");
535 }
536 #endif
537 }
538
539 #endif // Non-product