Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/allocation.cpp @ 1994:6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp
author | ysr |
---|---|
date | Tue, 07 Dec 2010 21:55:53 -0800 |
parents | f95d63e2154a |
children | 79d8657be916 |
rev | line source |
---|---|
0 | 1 /* |
1972 | 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
0
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
0
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
0
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "memory/allocation.hpp" | |
27 #include "memory/allocation.inline.hpp" | |
28 #include "memory/resourceArea.hpp" | |
29 #include "runtime/os.hpp" | |
30 #include "runtime/task.hpp" | |
31 #include "runtime/threadCritical.hpp" | |
32 #include "utilities/ostream.hpp" | |
33 #ifdef TARGET_OS_FAMILY_linux | |
34 # include "os_linux.inline.hpp" | |
35 #endif | |
36 #ifdef TARGET_OS_FAMILY_solaris | |
37 # include "os_solaris.inline.hpp" | |
38 #endif | |
39 #ifdef TARGET_OS_FAMILY_windows | |
40 # include "os_windows.inline.hpp" | |
41 #endif | |
0 | 42 |
43 void* CHeapObj::operator new(size_t size){ | |
44 return (void *) AllocateHeap(size, "CHeapObj-new"); | |
45 } | |
46 | |
47 void CHeapObj::operator delete(void* p){ | |
48 FreeHeap(p); | |
49 } | |
50 | |
51 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; | |
52 void StackObj::operator delete(void* p) { ShouldNotCallThis(); }; | |
53 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; | |
54 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }; | |
55 | |
56 void* ResourceObj::operator new(size_t size, allocation_type type) { | |
57 address res; | |
58 switch (type) { | |
59 case C_HEAP: | |
60 res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ"); | |
1685 | 61 DEBUG_ONLY(set_allocation_type(res, C_HEAP);) |
0 | 62 break; |
63 case RESOURCE_AREA: | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
64 // new(size) sets allocation type RESOURCE_AREA. |
0 | 65 res = (address)operator new(size); |
66 break; | |
67 default: | |
68 ShouldNotReachHere(); | |
69 } | |
70 return res; | |
71 } | |
72 | |
73 void ResourceObj::operator delete(void* p) { | |
74 assert(((ResourceObj *)p)->allocated_on_C_heap(), | |
75 "delete only allowed for C_HEAP objects"); | |
1744
f8c5d1bdaad4
6885308: The incorrect -XX:StackRedPages, -XX:StackShadowPages, -XX:StackYellowPages could cause VM crash
ptisnovs
parents:
1689
diff
changeset
|
76 DEBUG_ONLY(((ResourceObj *)p)->_allocation = (uintptr_t)badHeapOopVal;) |
0 | 77 FreeHeap(p); |
78 } | |
79 | |
1685 | 80 #ifdef ASSERT |
81 void ResourceObj::set_allocation_type(address res, allocation_type type) { | |
82 // Set allocation type in the resource object | |
83 uintptr_t allocation = (uintptr_t)res; | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
84 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least"); |
1685 | 85 assert(type <= allocation_mask, "incorrect allocation type"); |
86 ((ResourceObj *)res)->_allocation = ~(allocation + type); | |
87 } | |
88 | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
89 ResourceObj::allocation_type ResourceObj::get_allocation_type() const { |
1685 | 90 assert(~(_allocation | allocation_mask) == (uintptr_t)this, "lost resource object"); |
91 return (allocation_type)((~_allocation) & allocation_mask); | |
92 } | |
93 | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
94 ResourceObj::ResourceObj() { // default constructor |
1685 | 95 if (~(_allocation | allocation_mask) != (uintptr_t)this) { |
96 set_allocation_type((address)this, STACK_OR_EMBEDDED); | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
97 } else if (allocated_on_stack()) { |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
98 // For some reason we got a value which looks like an allocation on stack. |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
99 // Pass if it is really allocated on stack. |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
100 assert(Thread::current()->on_local_stack((address)this),"should be on stack"); |
1685 | 101 } else { |
102 assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena(), | |
103 "allocation_type should be set by operator new()"); | |
104 } | |
105 } | |
106 | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
107 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor |
1685 | 108 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. |
109 set_allocation_type((address)this, STACK_OR_EMBEDDED); | |
110 } | |
111 | |
112 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment | |
113 // Used in InlineTree::ok_to_inline() for WarmCallInfo. | |
114 assert(allocated_on_stack(), "copy only into local"); | |
115 // Keep current _allocation value; | |
116 return *this; | |
117 } | |
118 | |
119 ResourceObj::~ResourceObj() { | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
120 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1685
diff
changeset
|
121 if (!allocated_on_C_heap()) { // ResourceObj::delete() zaps _allocation for C_heap. |
1744
f8c5d1bdaad4
6885308: The incorrect -XX:StackRedPages, -XX:StackShadowPages, -XX:StackYellowPages could cause VM crash
ptisnovs
parents:
1689
diff
changeset
|
122 _allocation = (uintptr_t)badHeapOopVal; // zap type |
1685 | 123 } |
124 } | |
125 #endif // ASSERT | |
126 | |
127 | |
0 | 128 void trace_heap_malloc(size_t size, const char* name, void* p) { |
129 // A lock is not needed here - tty uses a lock internally | |
130 tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name); | |
131 } | |
132 | |
133 | |
134 void trace_heap_free(void* p) { | |
135 // A lock is not needed here - tty uses a lock internally | |
136 tty->print_cr("Heap free " INTPTR_FORMAT, p); | |
137 } | |
138 | |
139 bool warn_new_operator = false; // see vm_main | |
140 | |
141 //-------------------------------------------------------------------------------------- | |
142 // ChunkPool implementation | |
143 | |
144 // MT-safe pool of chunks to reduce malloc/free thrashing | |
145 // NB: not using Mutex because pools are used before Threads are initialized | |
146 class ChunkPool { | |
147 Chunk* _first; // first cached Chunk; its first word points to next chunk | |
148 size_t _num_chunks; // number of unused chunks in pool | |
149 size_t _num_used; // number of chunks currently checked out | |
150 const size_t _size; // size of each chunk (must be uniform) | |
151 | |
152 // Our three static pools | |
153 static ChunkPool* _large_pool; | |
154 static ChunkPool* _medium_pool; | |
155 static ChunkPool* _small_pool; | |
156 | |
157 // return first element or null | |
158 void* get_first() { | |
159 Chunk* c = _first; | |
160 if (_first) { | |
161 _first = _first->next(); | |
162 _num_chunks--; | |
163 } | |
164 return c; | |
165 } | |
166 | |
167 public: | |
168 // All chunks in a ChunkPool has the same size | |
169 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } | |
170 | |
171 // Allocate a new chunk from the pool (might expand the pool) | |
172 void* allocate(size_t bytes) { | |
173 assert(bytes == _size, "bad size"); | |
174 void* p = NULL; | |
175 { ThreadCritical tc; | |
176 _num_used++; | |
177 p = get_first(); | |
178 if (p == NULL) p = os::malloc(bytes); | |
179 } | |
180 if (p == NULL) | |
181 vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); | |
182 | |
183 return p; | |
184 } | |
185 | |
186 // Return a chunk to the pool | |
187 void free(Chunk* chunk) { | |
188 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); | |
189 ThreadCritical tc; | |
190 _num_used--; | |
191 | |
192 // Add chunk to list | |
193 chunk->set_next(_first); | |
194 _first = chunk; | |
195 _num_chunks++; | |
196 } | |
197 | |
198 // Prune the pool | |
199 void free_all_but(size_t n) { | |
200 // if we have more than n chunks, free all of them | |
201 ThreadCritical tc; | |
202 if (_num_chunks > n) { | |
203 // free chunks at end of queue, for better locality | |
204 Chunk* cur = _first; | |
205 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); | |
206 | |
207 if (cur != NULL) { | |
208 Chunk* next = cur->next(); | |
209 cur->set_next(NULL); | |
210 cur = next; | |
211 | |
212 // Free all remaining chunks | |
213 while(cur != NULL) { | |
214 next = cur->next(); | |
215 os::free(cur); | |
216 _num_chunks--; | |
217 cur = next; | |
218 } | |
219 } | |
220 } | |
221 } | |
222 | |
223 // Accessors to preallocated pool's | |
224 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } | |
225 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } | |
226 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } | |
227 | |
228 static void initialize() { | |
229 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); | |
230 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); | |
231 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); | |
232 } | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
233 |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
234 static void clean() { |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
235 enum { BlocksToKeep = 5 }; |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
236 _small_pool->free_all_but(BlocksToKeep); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
237 _medium_pool->free_all_but(BlocksToKeep); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
238 _large_pool->free_all_but(BlocksToKeep); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
239 } |
0 | 240 }; |
241 | |
242 ChunkPool* ChunkPool::_large_pool = NULL; | |
243 ChunkPool* ChunkPool::_medium_pool = NULL; | |
244 ChunkPool* ChunkPool::_small_pool = NULL; | |
245 | |
246 void chunkpool_init() { | |
247 ChunkPool::initialize(); | |
248 } | |
249 | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
250 void |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
251 Chunk::clean_chunk_pool() { |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
252 ChunkPool::clean(); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
253 } |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
254 |
0 | 255 |
256 //-------------------------------------------------------------------------------------- | |
257 // ChunkPoolCleaner implementation | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
258 // |
0 | 259 |
260 class ChunkPoolCleaner : public PeriodicTask { | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
261 enum { CleaningInterval = 5000 }; // cleaning interval in ms |
0 | 262 |
263 public: | |
264 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} | |
265 void task() { | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
266 ChunkPool::clean(); |
0 | 267 } |
268 }; | |
269 | |
270 //-------------------------------------------------------------------------------------- | |
271 // Chunk implementation | |
272 | |
273 void* Chunk::operator new(size_t requested_size, size_t length) { | |
274 // requested_size is equal to sizeof(Chunk) but in order for the arena | |
275 // allocations to come out aligned as expected the size must be aligned | |
276 // to expected arean alignment. | |
277 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. | |
278 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); | |
279 size_t bytes = ARENA_ALIGN(requested_size) + length; | |
280 switch (length) { | |
281 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); | |
282 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); | |
283 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); | |
284 default: { | |
285 void *p = os::malloc(bytes); | |
286 if (p == NULL) | |
287 vm_exit_out_of_memory(bytes, "Chunk::new"); | |
288 return p; | |
289 } | |
290 } | |
291 } | |
292 | |
293 void Chunk::operator delete(void* p) { | |
294 Chunk* c = (Chunk*)p; | |
295 switch (c->length()) { | |
296 case Chunk::size: ChunkPool::large_pool()->free(c); break; | |
297 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; | |
298 case Chunk::init_size: ChunkPool::small_pool()->free(c); break; | |
299 default: os::free(c); | |
300 } | |
301 } | |
302 | |
303 Chunk::Chunk(size_t length) : _len(length) { | |
304 _next = NULL; // Chain on the linked list | |
305 } | |
306 | |
307 | |
308 void Chunk::chop() { | |
309 Chunk *k = this; | |
310 while( k ) { | |
311 Chunk *tmp = k->next(); | |
312 // clear out this chunk (to detect allocation bugs) | |
313 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); | |
314 delete k; // Free chunk (was malloc'd) | |
315 k = tmp; | |
316 } | |
317 } | |
318 | |
319 void Chunk::next_chop() { | |
320 _next->chop(); | |
321 _next = NULL; | |
322 } | |
323 | |
324 | |
325 void Chunk::start_chunk_pool_cleaner_task() { | |
326 #ifdef ASSERT | |
327 static bool task_created = false; | |
328 assert(!task_created, "should not start chuck pool cleaner twice"); | |
329 task_created = true; | |
330 #endif | |
331 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); | |
332 cleaner->enroll(); | |
333 } | |
334 | |
335 //------------------------------Arena------------------------------------------ | |
336 | |
337 Arena::Arena(size_t init_size) { | |
338 size_t round_size = (sizeof (char *)) - 1; | |
339 init_size = (init_size+round_size) & ~round_size; | |
340 _first = _chunk = new (init_size) Chunk(init_size); | |
341 _hwm = _chunk->bottom(); // Save the cached hwm, max | |
342 _max = _chunk->top(); | |
343 set_size_in_bytes(init_size); | |
344 } | |
345 | |
346 Arena::Arena() { | |
347 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); | |
348 _hwm = _chunk->bottom(); // Save the cached hwm, max | |
349 _max = _chunk->top(); | |
350 set_size_in_bytes(Chunk::init_size); | |
351 } | |
352 | |
353 Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) { | |
354 set_size_in_bytes(a->size_in_bytes()); | |
355 } | |
356 | |
357 Arena *Arena::move_contents(Arena *copy) { | |
358 copy->destruct_contents(); | |
359 copy->_chunk = _chunk; | |
360 copy->_hwm = _hwm; | |
361 copy->_max = _max; | |
362 copy->_first = _first; | |
363 copy->set_size_in_bytes(size_in_bytes()); | |
364 // Destroy original arena | |
365 reset(); | |
366 return copy; // Return Arena with contents | |
367 } | |
368 | |
369 Arena::~Arena() { | |
370 destruct_contents(); | |
371 } | |
372 | |
373 // Destroy this arenas contents and reset to empty | |
374 void Arena::destruct_contents() { | |
375 if (UseMallocOnly && _first != NULL) { | |
376 char* end = _first->next() ? _first->top() : _hwm; | |
377 free_malloced_objects(_first, _first->bottom(), end, _hwm); | |
378 } | |
379 _first->chop(); | |
380 reset(); | |
381 } | |
382 | |
383 | |
384 // Total of all Chunks in arena | |
385 size_t Arena::used() const { | |
386 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk | |
387 register Chunk *k = _first; | |
388 while( k != _chunk) { // Whilst have Chunks in a row | |
389 sum += k->length(); // Total size of this Chunk | |
390 k = k->next(); // Bump along to next Chunk | |
391 } | |
392 return sum; // Return total consumed space. | |
393 } | |
394 | |
395 | |
396 // Grow a new Chunk | |
397 void* Arena::grow( size_t x ) { | |
398 // Get minimal required size. Either real big, or even bigger for giant objs | |
399 size_t len = MAX2(x, (size_t) Chunk::size); | |
400 | |
401 Chunk *k = _chunk; // Get filled-up chunk address | |
402 _chunk = new (len) Chunk(len); | |
403 | |
404 if (_chunk == NULL) | |
405 vm_exit_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); | |
406 | |
407 if (k) k->set_next(_chunk); // Append new chunk to end of linked list | |
408 else _first = _chunk; | |
409 _hwm = _chunk->bottom(); // Save the cached hwm, max | |
410 _max = _chunk->top(); | |
411 set_size_in_bytes(size_in_bytes() + len); | |
412 void* result = _hwm; | |
413 _hwm += x; | |
414 return result; | |
415 } | |
416 | |
417 | |
418 | |
419 // Reallocate storage in Arena. | |
420 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) { | |
421 assert(new_size >= 0, "bad size"); | |
422 if (new_size == 0) return NULL; | |
423 #ifdef ASSERT | |
424 if (UseMallocOnly) { | |
425 // always allocate a new object (otherwise we'll free this one twice) | |
426 char* copy = (char*)Amalloc(new_size); | |
427 size_t n = MIN2(old_size, new_size); | |
428 if (n > 0) memcpy(copy, old_ptr, n); | |
429 Afree(old_ptr,old_size); // Mostly done to keep stats accurate | |
430 return copy; | |
431 } | |
432 #endif | |
433 char *c_old = (char*)old_ptr; // Handy name | |
434 // Stupid fast special case | |
435 if( new_size <= old_size ) { // Shrink in-place | |
436 if( c_old+old_size == _hwm) // Attempt to free the excess bytes | |
437 _hwm = c_old+new_size; // Adjust hwm | |
438 return c_old; | |
439 } | |
440 | |
441 // make sure that new_size is legal | |
442 size_t corrected_new_size = ARENA_ALIGN(new_size); | |
443 | |
444 // See if we can resize in-place | |
445 if( (c_old+old_size == _hwm) && // Adjusting recent thing | |
446 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits | |
447 _hwm = c_old+corrected_new_size; // Adjust hwm | |
448 return c_old; // Return old pointer | |
449 } | |
450 | |
451 // Oops, got to relocate guts | |
452 void *new_ptr = Amalloc(new_size); | |
453 memcpy( new_ptr, c_old, old_size ); | |
454 Afree(c_old,old_size); // Mostly done to keep stats accurate | |
455 return new_ptr; | |
456 } | |
457 | |
458 | |
459 // Determine if pointer belongs to this Arena or not. | |
460 bool Arena::contains( const void *ptr ) const { | |
461 #ifdef ASSERT | |
462 if (UseMallocOnly) { | |
463 // really slow, but not easy to make fast | |
464 if (_chunk == NULL) return false; | |
465 char** bottom = (char**)_chunk->bottom(); | |
466 for (char** p = (char**)_hwm - 1; p >= bottom; p--) { | |
467 if (*p == ptr) return true; | |
468 } | |
469 for (Chunk *c = _first; c != NULL; c = c->next()) { | |
470 if (c == _chunk) continue; // current chunk has been processed | |
471 char** bottom = (char**)c->bottom(); | |
472 for (char** p = (char**)c->top() - 1; p >= bottom; p--) { | |
473 if (*p == ptr) return true; | |
474 } | |
475 } | |
476 return false; | |
477 } | |
478 #endif | |
479 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) | |
480 return true; // Check for in this chunk | |
481 for (Chunk *c = _first; c; c = c->next()) { | |
482 if (c == _chunk) continue; // current chunk has been processed | |
483 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { | |
484 return true; // Check for every chunk in Arena | |
485 } | |
486 } | |
487 return false; // Not in any Chunk, so not in Arena | |
488 } | |
489 | |
490 | |
491 #ifdef ASSERT | |
492 void* Arena::malloc(size_t size) { | |
493 assert(UseMallocOnly, "shouldn't call"); | |
494 // use malloc, but save pointer in res. area for later freeing | |
495 char** save = (char**)internal_malloc_4(sizeof(char*)); | |
496 return (*save = (char*)os::malloc(size)); | |
497 } | |
498 | |
499 // for debugging with UseMallocOnly | |
500 void* Arena::internal_malloc_4(size_t x) { | |
501 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); | |
502 if (_hwm + x > _max) { | |
503 return grow(x); | |
504 } else { | |
505 char *old = _hwm; | |
506 _hwm += x; | |
507 return old; | |
508 } | |
509 } | |
510 #endif | |
511 | |
512 | |
513 //-------------------------------------------------------------------------------------- | |
514 // Non-product code | |
515 | |
516 #ifndef PRODUCT | |
517 // The global operator new should never be called since it will usually indicate | |
518 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit | |
519 // that they're allocated on the C heap. | |
520 // Commented out in product version to avoid conflicts with third-party C++ native code. | |
521 // %% note this is causing a problem on solaris debug build. the global | |
522 // new is being called from jdk source and causing data corruption. | |
523 // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew | |
524 // define CATCH_OPERATOR_NEW_USAGE if you want to use this. | |
525 #ifdef CATCH_OPERATOR_NEW_USAGE | |
526 void* operator new(size_t size){ | |
527 static bool warned = false; | |
528 if (!warned && warn_new_operator) | |
529 warning("should not call global (default) operator new"); | |
530 warned = true; | |
531 return (void *) AllocateHeap(size, "global operator new"); | |
532 } | |
533 #endif | |
534 | |
535 void AllocatedObj::print() const { print_on(tty); } | |
536 void AllocatedObj::print_value() const { print_value_on(tty); } | |
537 | |
538 void AllocatedObj::print_on(outputStream* st) const { | |
539 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this); | |
540 } | |
541 | |
542 void AllocatedObj::print_value_on(outputStream* st) const { | |
543 st->print("AllocatedObj(" INTPTR_FORMAT ")", this); | |
544 } | |
545 | |
546 size_t Arena::_bytes_allocated = 0; | |
547 | |
548 AllocStats::AllocStats() { | |
549 start_mallocs = os::num_mallocs; | |
550 start_frees = os::num_frees; | |
551 start_malloc_bytes = os::alloc_bytes; | |
552 start_res_bytes = Arena::_bytes_allocated; | |
553 } | |
554 | |
555 int AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } | |
556 size_t AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } | |
557 size_t AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } | |
558 int AllocStats::num_frees() { return os::num_frees - start_frees; } | |
559 void AllocStats::print() { | |
560 tty->print("%d mallocs (%ldK), %d frees, %ldK resrc", | |
561 num_mallocs(), alloc_bytes()/K, num_frees(), resource_bytes()/K); | |
562 } | |
563 | |
564 | |
565 // debugging code | |
566 inline void Arena::free_all(char** start, char** end) { | |
567 for (char** p = start; p < end; p++) if (*p) os::free(*p); | |
568 } | |
569 | |
570 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { | |
571 assert(UseMallocOnly, "should not call"); | |
572 // free all objects malloced since resource mark was created; resource area | |
573 // contains their addresses | |
574 if (chunk->next()) { | |
575 // this chunk is full, and some others too | |
576 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { | |
577 char* top = c->top(); | |
578 if (c->next() == NULL) { | |
579 top = hwm2; // last junk is only used up to hwm2 | |
580 assert(c->contains(hwm2), "bad hwm2"); | |
581 } | |
582 free_all((char**)c->bottom(), (char**)top); | |
583 } | |
584 assert(chunk->contains(hwm), "bad hwm"); | |
585 assert(chunk->contains(max), "bad max"); | |
586 free_all((char**)hwm, (char**)max); | |
587 } else { | |
588 // this chunk was partially used | |
589 assert(chunk->contains(hwm), "bad hwm"); | |
590 assert(chunk->contains(hwm2), "bad hwm2"); | |
591 free_all((char**)hwm, (char**)hwm2); | |
592 } | |
593 } | |
594 | |
595 | |
596 ReallocMark::ReallocMark() { | |
597 #ifdef ASSERT | |
598 Thread *thread = ThreadLocalStorage::get_thread_slow(); | |
599 _nesting = thread->resource_area()->nesting(); | |
600 #endif | |
601 } | |
602 | |
603 void ReallocMark::check() { | |
604 #ifdef ASSERT | |
605 if (_nesting != Thread::current()->resource_area()->nesting()) { | |
606 fatal("allocation bug: array could grow within nested ResourceMark"); | |
607 } | |
608 #endif | |
609 } | |
610 | |
611 #endif // Non-product |