Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/heap.cpp @ 1994:6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp
author | ysr |
---|---|
date | Tue, 07 Dec 2010 21:55:53 -0800 |
parents | f95d63e2154a |
children | 51bd2d261853 |
rev | line source |
---|---|
0 | 1 /* |
1972 | 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1123
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1123
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1123
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "memory/heap.hpp" | |
27 #include "oops/oop.inline.hpp" | |
28 #include "runtime/os.hpp" | |
0 | 29 |
30 | |
31 size_t CodeHeap::header_size() { | |
32 return sizeof(HeapBlock); | |
33 } | |
34 | |
35 | |
36 // Implementation of Heap | |
37 | |
38 CodeHeap::CodeHeap() { | |
39 _number_of_committed_segments = 0; | |
40 _number_of_reserved_segments = 0; | |
41 _segment_size = 0; | |
42 _log2_segment_size = 0; | |
43 _next_segment = 0; | |
44 _freelist = NULL; | |
45 _free_segments = 0; | |
46 } | |
47 | |
48 | |
49 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { | |
50 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); | |
51 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); | |
52 // setup _segmap pointers for faster indexing | |
53 address p = (address)_segmap.low() + beg; | |
54 address q = (address)_segmap.low() + end; | |
55 // initialize interval | |
56 while (p < q) *p++ = 0xFF; | |
57 } | |
58 | |
59 | |
60 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) { | |
61 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); | |
62 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); | |
63 // setup _segmap pointers for faster indexing | |
64 address p = (address)_segmap.low() + beg; | |
65 address q = (address)_segmap.low() + end; | |
66 // initialize interval | |
67 int i = 0; | |
68 while (p < q) { | |
69 *p++ = i++; | |
70 if (i == 0xFF) i = 1; | |
71 } | |
72 } | |
73 | |
74 | |
75 static size_t align_to_page_size(size_t size) { | |
76 const size_t alignment = (size_t)os::vm_page_size(); | |
77 assert(is_power_of_2(alignment), "no kidding ???"); | |
78 return (size + alignment - 1) & ~(alignment - 1); | |
79 } | |
80 | |
81 | |
82 static size_t align_to_allocation_size(size_t size) { | |
83 const size_t alignment = (size_t)os::vm_allocation_granularity(); | |
84 assert(is_power_of_2(alignment), "no kidding ???"); | |
85 return (size + alignment - 1) & ~(alignment - 1); | |
86 } | |
87 | |
88 | |
89 void CodeHeap::on_code_mapping(char* base, size_t size) { | |
90 #ifdef LINUX | |
91 extern void linux_wrap_code(char* base, size_t size); | |
92 linux_wrap_code(base, size); | |
93 #endif | |
94 } | |
95 | |
96 | |
97 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size, | |
98 size_t segment_size) { | |
99 assert(reserved_size >= committed_size, "reserved < committed"); | |
100 assert(segment_size >= sizeof(FreeBlock), "segment size is too small"); | |
101 assert(is_power_of_2(segment_size), "segment_size must be a power of 2"); | |
102 | |
103 _segment_size = segment_size; | |
104 _log2_segment_size = exact_log2(segment_size); | |
105 | |
106 // Reserve and initialize space for _memory. | |
79
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
0
diff
changeset
|
107 const size_t page_size = os::can_execute_large_page_memory() ? |
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
0
diff
changeset
|
108 os::page_size_for_region(committed_size, reserved_size, 8) : |
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
0
diff
changeset
|
109 os::vm_page_size(); |
0 | 110 const size_t granularity = os::vm_allocation_granularity(); |
111 const size_t r_align = MAX2(page_size, granularity); | |
112 const size_t r_size = align_size_up(reserved_size, r_align); | |
113 const size_t c_size = align_size_up(committed_size, page_size); | |
114 | |
115 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : | |
116 MAX2(page_size, granularity); | |
656 | 117 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); |
0 | 118 os::trace_page_sizes("code heap", committed_size, reserved_size, page_size, |
119 rs.base(), rs.size()); | |
120 if (!_memory.initialize(rs, c_size)) { | |
121 return false; | |
122 } | |
123 | |
124 on_code_mapping(_memory.low(), _memory.committed_size()); | |
125 _number_of_committed_segments = number_of_segments(_memory.committed_size()); | |
126 _number_of_reserved_segments = number_of_segments(_memory.reserved_size()); | |
127 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); | |
128 | |
129 // reserve space for _segmap | |
130 if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) { | |
131 return false; | |
132 } | |
133 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map"); | |
134 assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map"); | |
135 assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking"); | |
136 | |
137 // initialize remaining instance variables | |
138 clear(); | |
139 return true; | |
140 } | |
141 | |
142 | |
143 void CodeHeap::release() { | |
144 Unimplemented(); | |
145 } | |
146 | |
147 | |
148 bool CodeHeap::expand_by(size_t size) { | |
149 // expand _memory space | |
150 size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size(); | |
151 if (dm > 0) { | |
152 char* base = _memory.low() + _memory.committed_size(); | |
153 if (!_memory.expand_by(dm)) return false; | |
154 on_code_mapping(base, dm); | |
155 size_t i = _number_of_committed_segments; | |
156 _number_of_committed_segments = number_of_segments(_memory.committed_size()); | |
157 assert(_number_of_reserved_segments == number_of_segments(_memory.reserved_size()), "number of reserved segments should not change"); | |
158 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); | |
159 // expand _segmap space | |
160 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); | |
161 if (ds > 0) { | |
162 if (!_segmap.expand_by(ds)) return false; | |
163 } | |
164 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking"); | |
165 // initialize additional segmap entries | |
166 mark_segmap_as_free(i, _number_of_committed_segments); | |
167 } | |
168 return true; | |
169 } | |
170 | |
171 | |
172 void CodeHeap::shrink_by(size_t size) { | |
173 Unimplemented(); | |
174 } | |
175 | |
176 | |
177 void CodeHeap::clear() { | |
178 _next_segment = 0; | |
179 mark_segmap_as_free(0, _number_of_committed_segments); | |
180 } | |
181 | |
182 | |
183 void* CodeHeap::allocate(size_t size) { | |
184 size_t length = number_of_segments(size + sizeof(HeapBlock)); | |
185 assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList"); | |
186 | |
187 // First check if we can satify request from freelist | |
188 debug_only(verify()); | |
189 HeapBlock* block = search_freelist(length); | |
190 debug_only(if (VerifyCodeCacheOften) verify()); | |
191 if (block != NULL) { | |
192 assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check"); | |
193 assert(!block->free(), "must be marked free"); | |
194 #ifdef ASSERT | |
195 memset((void *)block->allocated_space(), badCodeHeapNewVal, size); | |
196 #endif | |
197 return block->allocated_space(); | |
198 } | |
199 | |
200 if (length < CodeCacheMinBlockLength) { | |
201 length = CodeCacheMinBlockLength; | |
202 } | |
203 if (_next_segment + length <= _number_of_committed_segments) { | |
204 mark_segmap_as_used(_next_segment, _next_segment + length); | |
205 HeapBlock* b = block_at(_next_segment); | |
206 b->initialize(length); | |
207 _next_segment += length; | |
208 #ifdef ASSERT | |
209 memset((void *)b->allocated_space(), badCodeHeapNewVal, size); | |
210 #endif | |
211 return b->allocated_space(); | |
212 } else { | |
213 return NULL; | |
214 } | |
215 } | |
216 | |
217 | |
218 void CodeHeap::deallocate(void* p) { | |
219 assert(p == find_start(p), "illegal deallocation"); | |
220 // Find start of HeapBlock | |
221 HeapBlock* b = (((HeapBlock *)p) - 1); | |
222 assert(b->allocated_space() == p, "sanity check"); | |
223 #ifdef ASSERT | |
224 memset((void *)b->allocated_space(), | |
225 badCodeHeapFreeVal, | |
226 size(b->length()) - sizeof(HeapBlock)); | |
227 #endif | |
228 add_to_freelist(b); | |
229 | |
230 debug_only(if (VerifyCodeCacheOften) verify()); | |
231 } | |
232 | |
233 | |
234 void* CodeHeap::find_start(void* p) const { | |
235 if (!contains(p)) { | |
236 return NULL; | |
237 } | |
238 size_t i = segment_for(p); | |
239 address b = (address)_segmap.low(); | |
240 if (b[i] == 0xFF) { | |
241 return NULL; | |
242 } | |
243 while (b[i] > 0) i -= (int)b[i]; | |
244 HeapBlock* h = block_at(i); | |
245 if (h->free()) { | |
246 return NULL; | |
247 } | |
248 return h->allocated_space(); | |
249 } | |
250 | |
251 | |
252 size_t CodeHeap::alignment_unit() const { | |
253 // this will be a power of two | |
254 return _segment_size; | |
255 } | |
256 | |
257 | |
258 size_t CodeHeap::alignment_offset() const { | |
259 // The lowest address in any allocated block will be | |
260 // equal to alignment_offset (mod alignment_unit). | |
261 return sizeof(HeapBlock) & (_segment_size - 1); | |
262 } | |
263 | |
264 // Finds the next free heapblock. If the current one is free, that it returned | |
265 void* CodeHeap::next_free(HeapBlock *b) const { | |
266 // Since free blocks are merged, there is max. on free block | |
267 // between two used ones | |
268 if (b != NULL && b->free()) b = next_block(b); | |
269 assert(b == NULL || !b->free(), "must be in use or at end of heap"); | |
270 return (b == NULL) ? NULL : b->allocated_space(); | |
271 } | |
272 | |
273 // Returns the first used HeapBlock | |
274 HeapBlock* CodeHeap::first_block() const { | |
275 if (_next_segment > 0) | |
276 return block_at(0); | |
277 return NULL; | |
278 } | |
279 | |
280 HeapBlock *CodeHeap::block_start(void *q) const { | |
281 HeapBlock* b = (HeapBlock*)find_start(q); | |
282 if (b == NULL) return NULL; | |
283 return b - 1; | |
284 } | |
285 | |
286 // Returns the next Heap block an offset into one | |
287 HeapBlock* CodeHeap::next_block(HeapBlock *b) const { | |
288 if (b == NULL) return NULL; | |
289 size_t i = segment_for(b) + b->length(); | |
290 if (i < _next_segment) | |
291 return block_at(i); | |
292 return NULL; | |
293 } | |
294 | |
295 | |
296 // Returns current capacity | |
297 size_t CodeHeap::capacity() const { | |
298 return _memory.committed_size(); | |
299 } | |
300 | |
301 size_t CodeHeap::max_capacity() const { | |
302 return _memory.reserved_size(); | |
303 } | |
304 | |
305 size_t CodeHeap::allocated_capacity() const { | |
306 // Start with the committed size in _memory; | |
307 size_t l = _memory.committed_size(); | |
308 | |
309 // Subtract the committed, but unused, segments | |
310 l -= size(_number_of_committed_segments - _next_segment); | |
311 | |
312 // Subtract the size of the freelist | |
313 l -= size(_free_segments); | |
314 | |
315 return l; | |
316 } | |
317 | |
318 // Free list management | |
319 | |
320 FreeBlock *CodeHeap::following_block(FreeBlock *b) { | |
321 return (FreeBlock*)(((address)b) + _segment_size * b->length()); | |
322 } | |
323 | |
324 // Inserts block b after a | |
325 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) { | |
326 assert(a != NULL && b != NULL, "must be real pointers"); | |
327 | |
328 // Link b into the list after a | |
329 b->set_link(a->link()); | |
330 a->set_link(b); | |
331 | |
332 // See if we can merge blocks | |
333 merge_right(b); // Try to make b bigger | |
334 merge_right(a); // Try to make a include b | |
335 } | |
336 | |
337 // Try to merge this block with the following block | |
338 void CodeHeap::merge_right(FreeBlock *a) { | |
339 assert(a->free(), "must be a free block"); | |
340 if (following_block(a) == a->link()) { | |
341 assert(a->link() != NULL && a->link()->free(), "must be free too"); | |
342 // Update block a to include the following block | |
343 a->set_length(a->length() + a->link()->length()); | |
344 a->set_link(a->link()->link()); | |
345 // Update find_start map | |
346 size_t beg = segment_for(a); | |
347 mark_segmap_as_used(beg, beg + a->length()); | |
348 } | |
349 } | |
350 | |
351 void CodeHeap::add_to_freelist(HeapBlock *a) { | |
352 FreeBlock* b = (FreeBlock*)a; | |
353 assert(b != _freelist, "cannot be removed twice"); | |
354 | |
355 // Mark as free and update free space count | |
356 _free_segments += b->length(); | |
357 b->set_free(); | |
358 | |
359 // First element in list? | |
360 if (_freelist == NULL) { | |
361 _freelist = b; | |
362 b->set_link(NULL); | |
363 return; | |
364 } | |
365 | |
366 // Scan for right place to put into list. List | |
367 // is sorted by increasing addresseses | |
368 FreeBlock* prev = NULL; | |
369 FreeBlock* cur = _freelist; | |
370 while(cur != NULL && cur < b) { | |
371 assert(prev == NULL || prev < cur, "must be ordered"); | |
372 prev = cur; | |
373 cur = cur->link(); | |
374 } | |
375 | |
376 assert( (prev == NULL && b < _freelist) || | |
377 (prev < b && (cur == NULL || b < cur)), "list must be ordered"); | |
378 | |
379 if (prev == NULL) { | |
380 // Insert first in list | |
381 b->set_link(_freelist); | |
382 _freelist = b; | |
383 merge_right(_freelist); | |
384 } else { | |
385 insert_after(prev, b); | |
386 } | |
387 } | |
388 | |
389 // Search freelist for an entry on the list with the best fit | |
390 // Return NULL if no one was found | |
391 FreeBlock* CodeHeap::search_freelist(size_t length) { | |
392 FreeBlock *best_block = NULL; | |
393 FreeBlock *best_prev = NULL; | |
394 size_t best_length = 0; | |
395 | |
396 // Search for smallest block which is bigger than length | |
397 FreeBlock *prev = NULL; | |
398 FreeBlock *cur = _freelist; | |
399 while(cur != NULL) { | |
400 size_t l = cur->length(); | |
401 if (l >= length && (best_block == NULL || best_length > l)) { | |
402 // Remember best block, its previous element, and its length | |
403 best_block = cur; | |
404 best_prev = prev; | |
405 best_length = best_block->length(); | |
406 } | |
407 | |
408 // Next element in list | |
409 prev = cur; | |
410 cur = cur->link(); | |
411 } | |
412 | |
413 if (best_block == NULL) { | |
414 // None found | |
415 return NULL; | |
416 } | |
417 | |
418 assert((best_prev == NULL && _freelist == best_block ) || | |
419 (best_prev != NULL && best_prev->link() == best_block), "sanity check"); | |
420 | |
421 // Exact (or at least good enough) fit. Remove from list. | |
422 // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength. | |
423 if (best_length < length + CodeCacheMinBlockLength) { | |
424 length = best_length; | |
425 if (best_prev == NULL) { | |
426 assert(_freelist == best_block, "sanity check"); | |
427 _freelist = _freelist->link(); | |
428 } else { | |
429 // Unmap element | |
430 best_prev->set_link(best_block->link()); | |
431 } | |
432 } else { | |
433 // Truncate block and return a pointer to the following block | |
434 best_block->set_length(best_length - length); | |
435 best_block = following_block(best_block); | |
436 // Set used bit and length on new block | |
437 size_t beg = segment_for(best_block); | |
438 mark_segmap_as_used(beg, beg + length); | |
439 best_block->set_length(length); | |
440 } | |
441 | |
442 best_block->set_used(); | |
443 _free_segments -= length; | |
444 return best_block; | |
445 } | |
446 | |
447 //---------------------------------------------------------------------------- | |
448 // Non-product code | |
449 | |
450 #ifndef PRODUCT | |
451 | |
452 void CodeHeap::print() { | |
453 tty->print_cr("The Heap"); | |
454 } | |
455 | |
456 #endif | |
457 | |
458 void CodeHeap::verify() { | |
459 // Count the number of blocks on the freelist, and the amount of space | |
460 // represented. | |
461 int count = 0; | |
462 size_t len = 0; | |
463 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) { | |
464 len += b->length(); | |
465 count++; | |
466 } | |
467 | |
468 // Verify that freelist contains the right amount of free space | |
1123
167c2986d91b
6843629: Make current hotspot build part of jdk5 control build
phh
parents:
844
diff
changeset
|
469 // guarantee(len == _free_segments, "wrong freelist"); |
0 | 470 |
471 // Verify that the number of free blocks is not out of hand. | |
472 static int free_block_threshold = 10000; | |
473 if (count > free_block_threshold) { | |
474 warning("CodeHeap: # of free blocks > %d", free_block_threshold); | |
475 // Double the warning limit | |
476 free_block_threshold *= 2; | |
477 } | |
478 | |
479 // Verify that the freelist contains the same number of free blocks that is | |
480 // found on the full list. | |
481 for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) { | |
482 if (h->free()) count--; | |
483 } | |
1123
167c2986d91b
6843629: Make current hotspot build part of jdk5 control build
phh
parents:
844
diff
changeset
|
484 // guarantee(count == 0, "missing free blocks"); |
0 | 485 } |