Mercurial > hg > truffle
annotate src/share/vm/memory/heap.cpp @ 20543:e7d0505c8a30
8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso
author | tschatzl |
---|---|
date | Fri, 10 Oct 2014 15:51:58 +0200 |
parents | 740e263c80c6 |
children | 58fc8e2b7b6d 63a4eb8bcd23 |
rev | line source |
---|---|
0 | 1 /* |
12056
740e263c80c6
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
10114
diff
changeset
|
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1123
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1123
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1123
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "memory/heap.hpp" | |
27 #include "oops/oop.inline.hpp" | |
28 #include "runtime/os.hpp" | |
6197 | 29 #include "services/memTracker.hpp" |
0 | 30 |
31 size_t CodeHeap::header_size() { | |
32 return sizeof(HeapBlock); | |
33 } | |
34 | |
35 | |
36 // Implementation of Heap | |
37 | |
38 CodeHeap::CodeHeap() { | |
39 _number_of_committed_segments = 0; | |
40 _number_of_reserved_segments = 0; | |
41 _segment_size = 0; | |
42 _log2_segment_size = 0; | |
43 _next_segment = 0; | |
44 _freelist = NULL; | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
45 _freelist_segments = 0; |
0 | 46 } |
47 | |
48 | |
49 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { | |
50 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); | |
51 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); | |
52 // setup _segmap pointers for faster indexing | |
53 address p = (address)_segmap.low() + beg; | |
54 address q = (address)_segmap.low() + end; | |
55 // initialize interval | |
56 while (p < q) *p++ = 0xFF; | |
57 } | |
58 | |
59 | |
60 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) { | |
61 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); | |
62 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); | |
63 // setup _segmap pointers for faster indexing | |
64 address p = (address)_segmap.low() + beg; | |
65 address q = (address)_segmap.low() + end; | |
66 // initialize interval | |
67 int i = 0; | |
68 while (p < q) { | |
69 *p++ = i++; | |
70 if (i == 0xFF) i = 1; | |
71 } | |
72 } | |
73 | |
74 | |
75 static size_t align_to_page_size(size_t size) { | |
76 const size_t alignment = (size_t)os::vm_page_size(); | |
77 assert(is_power_of_2(alignment), "no kidding ???"); | |
78 return (size + alignment - 1) & ~(alignment - 1); | |
79 } | |
80 | |
81 | |
82 void CodeHeap::on_code_mapping(char* base, size_t size) { | |
83 #ifdef LINUX | |
84 extern void linux_wrap_code(char* base, size_t size); | |
85 linux_wrap_code(base, size); | |
86 #endif | |
87 } | |
88 | |
89 | |
90 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size, | |
91 size_t segment_size) { | |
92 assert(reserved_size >= committed_size, "reserved < committed"); | |
93 assert(segment_size >= sizeof(FreeBlock), "segment size is too small"); | |
94 assert(is_power_of_2(segment_size), "segment_size must be a power of 2"); | |
95 | |
96 _segment_size = segment_size; | |
97 _log2_segment_size = exact_log2(segment_size); | |
98 | |
99 // Reserve and initialize space for _memory. | |
79
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
0
diff
changeset
|
100 const size_t page_size = os::can_execute_large_page_memory() ? |
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
0
diff
changeset
|
101 os::page_size_for_region(committed_size, reserved_size, 8) : |
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
0
diff
changeset
|
102 os::vm_page_size(); |
0 | 103 const size_t granularity = os::vm_allocation_granularity(); |
104 const size_t r_align = MAX2(page_size, granularity); | |
105 const size_t r_size = align_size_up(reserved_size, r_align); | |
106 const size_t c_size = align_size_up(committed_size, page_size); | |
107 | |
108 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : | |
109 MAX2(page_size, granularity); | |
656 | 110 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); |
0 | 111 os::trace_page_sizes("code heap", committed_size, reserved_size, page_size, |
112 rs.base(), rs.size()); | |
113 if (!_memory.initialize(rs, c_size)) { | |
114 return false; | |
115 } | |
116 | |
117 on_code_mapping(_memory.low(), _memory.committed_size()); | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
118 _number_of_committed_segments = size_to_segments(_memory.committed_size()); |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
119 _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); |
0 | 120 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); |
12056
740e263c80c6
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
10114
diff
changeset
|
121 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity); |
740e263c80c6
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
10114
diff
changeset
|
122 const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment); |
740e263c80c6
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
10114
diff
changeset
|
123 const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments); |
0 | 124 |
125 // reserve space for _segmap | |
12056
740e263c80c6
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
10114
diff
changeset
|
126 if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) { |
0 | 127 return false; |
128 } | |
6197 | 129 |
130 MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode); | |
131 | |
0 | 132 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map"); |
133 assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map"); | |
134 assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking"); | |
135 | |
136 // initialize remaining instance variables | |
137 clear(); | |
138 return true; | |
139 } | |
140 | |
141 | |
142 void CodeHeap::release() { | |
143 Unimplemented(); | |
144 } | |
145 | |
146 | |
147 bool CodeHeap::expand_by(size_t size) { | |
148 // expand _memory space | |
149 size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size(); | |
150 if (dm > 0) { | |
151 char* base = _memory.low() + _memory.committed_size(); | |
152 if (!_memory.expand_by(dm)) return false; | |
153 on_code_mapping(base, dm); | |
154 size_t i = _number_of_committed_segments; | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
155 _number_of_committed_segments = size_to_segments(_memory.committed_size()); |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
156 assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change"); |
0 | 157 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); |
158 // expand _segmap space | |
159 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); | |
160 if (ds > 0) { | |
161 if (!_segmap.expand_by(ds)) return false; | |
162 } | |
163 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking"); | |
164 // initialize additional segmap entries | |
165 mark_segmap_as_free(i, _number_of_committed_segments); | |
166 } | |
167 return true; | |
168 } | |
169 | |
170 | |
171 void CodeHeap::shrink_by(size_t size) { | |
172 Unimplemented(); | |
173 } | |
174 | |
175 | |
176 void CodeHeap::clear() { | |
177 _next_segment = 0; | |
178 mark_segmap_as_free(0, _number_of_committed_segments); | |
179 } | |
180 | |
181 | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
182 void* CodeHeap::allocate(size_t instance_size, bool is_critical) { |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
183 size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock)); |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
184 assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); |
0 | 185 |
186 // First check if we can satify request from freelist | |
187 debug_only(verify()); | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
188 HeapBlock* block = search_freelist(number_of_segments, is_critical); |
0 | 189 debug_only(if (VerifyCodeCacheOften) verify()); |
190 if (block != NULL) { | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
191 assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check"); |
0 | 192 assert(!block->free(), "must be marked free"); |
193 #ifdef ASSERT | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
194 memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size); |
0 | 195 #endif |
196 return block->allocated_space(); | |
197 } | |
198 | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
199 // Ensure minimum size for allocation to the heap. |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
200 if (number_of_segments < CodeCacheMinBlockLength) { |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
201 number_of_segments = CodeCacheMinBlockLength; |
0 | 202 } |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
203 |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
204 if (!is_critical) { |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
205 // Make sure the allocation fits in the unallocated heap without using |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
206 // the CodeCacheMimimumFreeSpace that is reserved for critical allocations. |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
207 if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) { |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
208 // Fail allocation |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
209 return NULL; |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
210 } |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
211 } |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
212 |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
213 if (_next_segment + number_of_segments <= _number_of_committed_segments) { |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
214 mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); |
0 | 215 HeapBlock* b = block_at(_next_segment); |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
216 b->initialize(number_of_segments); |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
217 _next_segment += number_of_segments; |
0 | 218 #ifdef ASSERT |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
219 memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size); |
0 | 220 #endif |
221 return b->allocated_space(); | |
222 } else { | |
223 return NULL; | |
224 } | |
225 } | |
226 | |
227 | |
228 void CodeHeap::deallocate(void* p) { | |
229 assert(p == find_start(p), "illegal deallocation"); | |
230 // Find start of HeapBlock | |
231 HeapBlock* b = (((HeapBlock *)p) - 1); | |
232 assert(b->allocated_space() == p, "sanity check"); | |
233 #ifdef ASSERT | |
234 memset((void *)b->allocated_space(), | |
235 badCodeHeapFreeVal, | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
236 segments_to_size(b->length()) - sizeof(HeapBlock)); |
0 | 237 #endif |
238 add_to_freelist(b); | |
239 | |
240 debug_only(if (VerifyCodeCacheOften) verify()); | |
241 } | |
242 | |
243 | |
244 void* CodeHeap::find_start(void* p) const { | |
245 if (!contains(p)) { | |
246 return NULL; | |
247 } | |
248 size_t i = segment_for(p); | |
249 address b = (address)_segmap.low(); | |
250 if (b[i] == 0xFF) { | |
251 return NULL; | |
252 } | |
253 while (b[i] > 0) i -= (int)b[i]; | |
254 HeapBlock* h = block_at(i); | |
255 if (h->free()) { | |
256 return NULL; | |
257 } | |
258 return h->allocated_space(); | |
259 } | |
260 | |
261 | |
262 size_t CodeHeap::alignment_unit() const { | |
263 // this will be a power of two | |
264 return _segment_size; | |
265 } | |
266 | |
267 | |
268 size_t CodeHeap::alignment_offset() const { | |
269 // The lowest address in any allocated block will be | |
270 // equal to alignment_offset (mod alignment_unit). | |
271 return sizeof(HeapBlock) & (_segment_size - 1); | |
272 } | |
273 | |
274 // Finds the next free heapblock. If the current one is free, that it returned | |
275 void* CodeHeap::next_free(HeapBlock *b) const { | |
276 // Since free blocks are merged, there is max. on free block | |
277 // between two used ones | |
278 if (b != NULL && b->free()) b = next_block(b); | |
279 assert(b == NULL || !b->free(), "must be in use or at end of heap"); | |
280 return (b == NULL) ? NULL : b->allocated_space(); | |
281 } | |
282 | |
283 // Returns the first used HeapBlock | |
284 HeapBlock* CodeHeap::first_block() const { | |
285 if (_next_segment > 0) | |
286 return block_at(0); | |
287 return NULL; | |
288 } | |
289 | |
290 HeapBlock *CodeHeap::block_start(void *q) const { | |
291 HeapBlock* b = (HeapBlock*)find_start(q); | |
292 if (b == NULL) return NULL; | |
293 return b - 1; | |
294 } | |
295 | |
296 // Returns the next Heap block an offset into one | |
297 HeapBlock* CodeHeap::next_block(HeapBlock *b) const { | |
298 if (b == NULL) return NULL; | |
299 size_t i = segment_for(b) + b->length(); | |
300 if (i < _next_segment) | |
301 return block_at(i); | |
302 return NULL; | |
303 } | |
304 | |
305 | |
306 // Returns current capacity | |
307 size_t CodeHeap::capacity() const { | |
308 return _memory.committed_size(); | |
309 } | |
310 | |
311 size_t CodeHeap::max_capacity() const { | |
312 return _memory.reserved_size(); | |
313 } | |
314 | |
315 size_t CodeHeap::allocated_capacity() const { | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
316 // size of used heap - size on freelist |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
317 return segments_to_size(_next_segment - _freelist_segments); |
0 | 318 } |
319 | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
320 // Returns size of the unallocated heap block |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
321 size_t CodeHeap::heap_unallocated_capacity() const { |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
322 // Total number of segments - number currently used |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
323 return segments_to_size(_number_of_reserved_segments - _next_segment); |
2091
51bd2d261853
7008325: CodeCache exhausted on sparc starting from hs20b04
kvn
parents:
1972
diff
changeset
|
324 } |
51bd2d261853
7008325: CodeCache exhausted on sparc starting from hs20b04
kvn
parents:
1972
diff
changeset
|
325 |
0 | 326 // Free list management |
327 | |
328 FreeBlock *CodeHeap::following_block(FreeBlock *b) { | |
329 return (FreeBlock*)(((address)b) + _segment_size * b->length()); | |
330 } | |
331 | |
332 // Inserts block b after a | |
333 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) { | |
334 assert(a != NULL && b != NULL, "must be real pointers"); | |
335 | |
336 // Link b into the list after a | |
337 b->set_link(a->link()); | |
338 a->set_link(b); | |
339 | |
340 // See if we can merge blocks | |
341 merge_right(b); // Try to make b bigger | |
342 merge_right(a); // Try to make a include b | |
343 } | |
344 | |
345 // Try to merge this block with the following block | |
346 void CodeHeap::merge_right(FreeBlock *a) { | |
347 assert(a->free(), "must be a free block"); | |
348 if (following_block(a) == a->link()) { | |
349 assert(a->link() != NULL && a->link()->free(), "must be free too"); | |
350 // Update block a to include the following block | |
351 a->set_length(a->length() + a->link()->length()); | |
352 a->set_link(a->link()->link()); | |
353 // Update find_start map | |
354 size_t beg = segment_for(a); | |
355 mark_segmap_as_used(beg, beg + a->length()); | |
356 } | |
357 } | |
358 | |
359 void CodeHeap::add_to_freelist(HeapBlock *a) { | |
360 FreeBlock* b = (FreeBlock*)a; | |
361 assert(b != _freelist, "cannot be removed twice"); | |
362 | |
363 // Mark as free and update free space count | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
364 _freelist_segments += b->length(); |
0 | 365 b->set_free(); |
366 | |
367 // First element in list? | |
368 if (_freelist == NULL) { | |
369 _freelist = b; | |
370 b->set_link(NULL); | |
371 return; | |
372 } | |
373 | |
374 // Scan for right place to put into list. List | |
375 // is sorted by increasing addresseses | |
376 FreeBlock* prev = NULL; | |
377 FreeBlock* cur = _freelist; | |
378 while(cur != NULL && cur < b) { | |
379 assert(prev == NULL || prev < cur, "must be ordered"); | |
380 prev = cur; | |
381 cur = cur->link(); | |
382 } | |
383 | |
384 assert( (prev == NULL && b < _freelist) || | |
385 (prev < b && (cur == NULL || b < cur)), "list must be ordered"); | |
386 | |
387 if (prev == NULL) { | |
388 // Insert first in list | |
389 b->set_link(_freelist); | |
390 _freelist = b; | |
391 merge_right(_freelist); | |
392 } else { | |
393 insert_after(prev, b); | |
394 } | |
395 } | |
396 | |
397 // Search freelist for an entry on the list with the best fit | |
398 // Return NULL if no one was found | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
399 FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) { |
0 | 400 FreeBlock *best_block = NULL; |
401 FreeBlock *best_prev = NULL; | |
402 size_t best_length = 0; | |
403 | |
404 // Search for smallest block which is bigger than length | |
405 FreeBlock *prev = NULL; | |
406 FreeBlock *cur = _freelist; | |
407 while(cur != NULL) { | |
408 size_t l = cur->length(); | |
409 if (l >= length && (best_block == NULL || best_length > l)) { | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
410 |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
411 // Non critical allocations are not allowed to use the last part of the code heap. |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
412 if (!is_critical) { |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
413 // Make sure the end of the allocation doesn't cross into the last part of the code heap |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
414 if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) { |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
415 // the freelist is sorted by address - if one fails, all consecutive will also fail. |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
416 break; |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
417 } |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
418 } |
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
419 |
0 | 420 // Remember best block, its previous element, and its length |
421 best_block = cur; | |
422 best_prev = prev; | |
423 best_length = best_block->length(); | |
424 } | |
425 | |
426 // Next element in list | |
427 prev = cur; | |
428 cur = cur->link(); | |
429 } | |
430 | |
431 if (best_block == NULL) { | |
432 // None found | |
433 return NULL; | |
434 } | |
435 | |
436 assert((best_prev == NULL && _freelist == best_block ) || | |
437 (best_prev != NULL && best_prev->link() == best_block), "sanity check"); | |
438 | |
439 // Exact (or at least good enough) fit. Remove from list. | |
440 // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength. | |
441 if (best_length < length + CodeCacheMinBlockLength) { | |
442 length = best_length; | |
443 if (best_prev == NULL) { | |
444 assert(_freelist == best_block, "sanity check"); | |
445 _freelist = _freelist->link(); | |
446 } else { | |
447 // Unmap element | |
448 best_prev->set_link(best_block->link()); | |
449 } | |
450 } else { | |
451 // Truncate block and return a pointer to the following block | |
452 best_block->set_length(best_length - length); | |
453 best_block = following_block(best_block); | |
454 // Set used bit and length on new block | |
455 size_t beg = segment_for(best_block); | |
456 mark_segmap_as_used(beg, beg + length); | |
457 best_block->set_length(length); | |
458 } | |
459 | |
460 best_block->set_used(); | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
461 _freelist_segments -= length; |
0 | 462 return best_block; |
463 } | |
464 | |
465 //---------------------------------------------------------------------------- | |
466 // Non-product code | |
467 | |
468 #ifndef PRODUCT | |
469 | |
470 void CodeHeap::print() { | |
471 tty->print_cr("The Heap"); | |
472 } | |
473 | |
474 #endif | |
475 | |
476 void CodeHeap::verify() { | |
477 // Count the number of blocks on the freelist, and the amount of space | |
478 // represented. | |
479 int count = 0; | |
480 size_t len = 0; | |
481 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) { | |
482 len += b->length(); | |
483 count++; | |
484 } | |
485 | |
486 // Verify that freelist contains the right amount of free space | |
10114
a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
neliasso
parents:
9060
diff
changeset
|
487 // guarantee(len == _freelist_segments, "wrong freelist"); |
0 | 488 |
489 // Verify that the number of free blocks is not out of hand. | |
490 static int free_block_threshold = 10000; | |
491 if (count > free_block_threshold) { | |
492 warning("CodeHeap: # of free blocks > %d", free_block_threshold); | |
493 // Double the warning limit | |
494 free_block_threshold *= 2; | |
495 } | |
496 | |
497 // Verify that the freelist contains the same number of free blocks that is | |
498 // found on the full list. | |
499 for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) { | |
500 if (h->free()) count--; | |
501 } | |
1123
167c2986d91b
6843629: Make current hotspot build part of jdk5 control build
phh
parents:
844
diff
changeset
|
502 // guarantee(count == 0, "missing free blocks"); |
0 | 503 } |