Mercurial > hg > graal-compiler
comparison src/share/vm/memory/heap.cpp @ 10114:a7fb14888912
8006952: Slow VM due to excessive code cache freelist iteration
Summary: Remove continous free block requirement
Reviewed-by: kvn
author | neliasso |
---|---|
date | Thu, 11 Apr 2013 13:57:44 +0200 |
parents | cc32ccaaf47f |
children | 836a62f43af9 740e263c80c6 |
comparison
equal
deleted
inserted
replaced
10113:4b2eebe03f93 | 10114:a7fb14888912 |
---|---|
40 _number_of_reserved_segments = 0; | 40 _number_of_reserved_segments = 0; |
41 _segment_size = 0; | 41 _segment_size = 0; |
42 _log2_segment_size = 0; | 42 _log2_segment_size = 0; |
43 _next_segment = 0; | 43 _next_segment = 0; |
44 _freelist = NULL; | 44 _freelist = NULL; |
45 _free_segments = 0; | 45 _freelist_segments = 0; |
46 } | 46 } |
47 | 47 |
48 | 48 |
49 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { | 49 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { |
50 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); | 50 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); |
113 if (!_memory.initialize(rs, c_size)) { | 113 if (!_memory.initialize(rs, c_size)) { |
114 return false; | 114 return false; |
115 } | 115 } |
116 | 116 |
117 on_code_mapping(_memory.low(), _memory.committed_size()); | 117 on_code_mapping(_memory.low(), _memory.committed_size()); |
118 _number_of_committed_segments = number_of_segments(_memory.committed_size()); | 118 _number_of_committed_segments = size_to_segments(_memory.committed_size()); |
119 _number_of_reserved_segments = number_of_segments(_memory.reserved_size()); | 119 _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); |
120 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); | 120 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); |
121 | 121 |
122 // reserve space for _segmap | 122 // reserve space for _segmap |
123 if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) { | 123 if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) { |
124 return false; | 124 return false; |
147 if (dm > 0) { | 147 if (dm > 0) { |
148 char* base = _memory.low() + _memory.committed_size(); | 148 char* base = _memory.low() + _memory.committed_size(); |
149 if (!_memory.expand_by(dm)) return false; | 149 if (!_memory.expand_by(dm)) return false; |
150 on_code_mapping(base, dm); | 150 on_code_mapping(base, dm); |
151 size_t i = _number_of_committed_segments; | 151 size_t i = _number_of_committed_segments; |
152 _number_of_committed_segments = number_of_segments(_memory.committed_size()); | 152 _number_of_committed_segments = size_to_segments(_memory.committed_size()); |
153 assert(_number_of_reserved_segments == number_of_segments(_memory.reserved_size()), "number of reserved segments should not change"); | 153 assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change"); |
154 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); | 154 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); |
155 // expand _segmap space | 155 // expand _segmap space |
156 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); | 156 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); |
157 if (ds > 0) { | 157 if (ds > 0) { |
158 if (!_segmap.expand_by(ds)) return false; | 158 if (!_segmap.expand_by(ds)) return false; |
174 _next_segment = 0; | 174 _next_segment = 0; |
175 mark_segmap_as_free(0, _number_of_committed_segments); | 175 mark_segmap_as_free(0, _number_of_committed_segments); |
176 } | 176 } |
177 | 177 |
178 | 178 |
179 void* CodeHeap::allocate(size_t size) { | 179 void* CodeHeap::allocate(size_t instance_size, bool is_critical) { |
180 size_t length = number_of_segments(size + sizeof(HeapBlock)); | 180 size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock)); |
181 assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList"); | 181 assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); |
182 | 182 |
183 // First check if we can satify request from freelist | 183 // First check if we can satify request from freelist |
184 debug_only(verify()); | 184 debug_only(verify()); |
185 HeapBlock* block = search_freelist(length); | 185 HeapBlock* block = search_freelist(number_of_segments, is_critical); |
186 debug_only(if (VerifyCodeCacheOften) verify()); | 186 debug_only(if (VerifyCodeCacheOften) verify()); |
187 if (block != NULL) { | 187 if (block != NULL) { |
188 assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check"); | 188 assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check"); |
189 assert(!block->free(), "must be marked free"); | 189 assert(!block->free(), "must be marked free"); |
190 #ifdef ASSERT | 190 #ifdef ASSERT |
191 memset((void *)block->allocated_space(), badCodeHeapNewVal, size); | 191 memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size); |
192 #endif | 192 #endif |
193 return block->allocated_space(); | 193 return block->allocated_space(); |
194 } | 194 } |
195 | 195 |
196 if (length < CodeCacheMinBlockLength) { | 196 // Ensure minimum size for allocation to the heap. |
197 length = CodeCacheMinBlockLength; | 197 if (number_of_segments < CodeCacheMinBlockLength) { |
198 } | 198 number_of_segments = CodeCacheMinBlockLength; |
199 if (_next_segment + length <= _number_of_committed_segments) { | 199 } |
200 mark_segmap_as_used(_next_segment, _next_segment + length); | 200 |
201 if (!is_critical) { | |
202 // Make sure the allocation fits in the unallocated heap without using | |
203 // the CodeCacheMimimumFreeSpace that is reserved for critical allocations. | |
204 if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) { | |
205 // Fail allocation | |
206 return NULL; | |
207 } | |
208 } | |
209 | |
210 if (_next_segment + number_of_segments <= _number_of_committed_segments) { | |
211 mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); | |
201 HeapBlock* b = block_at(_next_segment); | 212 HeapBlock* b = block_at(_next_segment); |
202 b->initialize(length); | 213 b->initialize(number_of_segments); |
203 _next_segment += length; | 214 _next_segment += number_of_segments; |
204 #ifdef ASSERT | 215 #ifdef ASSERT |
205 memset((void *)b->allocated_space(), badCodeHeapNewVal, size); | 216 memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size); |
206 #endif | 217 #endif |
207 return b->allocated_space(); | 218 return b->allocated_space(); |
208 } else { | 219 } else { |
209 return NULL; | 220 return NULL; |
210 } | 221 } |
217 HeapBlock* b = (((HeapBlock *)p) - 1); | 228 HeapBlock* b = (((HeapBlock *)p) - 1); |
218 assert(b->allocated_space() == p, "sanity check"); | 229 assert(b->allocated_space() == p, "sanity check"); |
219 #ifdef ASSERT | 230 #ifdef ASSERT |
220 memset((void *)b->allocated_space(), | 231 memset((void *)b->allocated_space(), |
221 badCodeHeapFreeVal, | 232 badCodeHeapFreeVal, |
222 size(b->length()) - sizeof(HeapBlock)); | 233 segments_to_size(b->length()) - sizeof(HeapBlock)); |
223 #endif | 234 #endif |
224 add_to_freelist(b); | 235 add_to_freelist(b); |
225 | 236 |
226 debug_only(if (VerifyCodeCacheOften) verify()); | 237 debug_only(if (VerifyCodeCacheOften) verify()); |
227 } | 238 } |
297 size_t CodeHeap::max_capacity() const { | 308 size_t CodeHeap::max_capacity() const { |
298 return _memory.reserved_size(); | 309 return _memory.reserved_size(); |
299 } | 310 } |
300 | 311 |
301 size_t CodeHeap::allocated_capacity() const { | 312 size_t CodeHeap::allocated_capacity() const { |
302 // Start with the committed size in _memory; | 313 // size of used heap - size on freelist |
303 size_t l = _memory.committed_size(); | 314 return segments_to_size(_next_segment - _freelist_segments); |
304 | 315 } |
305 // Subtract the committed, but unused, segments | 316 |
306 l -= size(_number_of_committed_segments - _next_segment); | 317 // Returns size of the unallocated heap block |
307 | 318 size_t CodeHeap::heap_unallocated_capacity() const { |
308 // Subtract the size of the freelist | 319 // Total number of segments - number currently used |
309 l -= size(_free_segments); | 320 return segments_to_size(_number_of_reserved_segments - _next_segment); |
310 | |
311 return l; | |
312 } | |
313 | |
314 size_t CodeHeap::largest_free_block() const { | |
315 // First check unused space excluding free blocks. | |
316 size_t free_sz = size(_free_segments); | |
317 size_t unused = max_capacity() - allocated_capacity() - free_sz; | |
318 if (unused >= free_sz) | |
319 return unused; | |
320 | |
321 // Now check largest free block. | |
322 size_t len = 0; | |
323 for (FreeBlock* b = _freelist; b != NULL; b = b->link()) { | |
324 if (b->length() > len) | |
325 len = b->length(); | |
326 } | |
327 return MAX2(unused, size(len)); | |
328 } | 321 } |
329 | 322 |
330 // Free list management | 323 // Free list management |
331 | 324 |
332 FreeBlock *CodeHeap::following_block(FreeBlock *b) { | 325 FreeBlock *CodeHeap::following_block(FreeBlock *b) { |
363 void CodeHeap::add_to_freelist(HeapBlock *a) { | 356 void CodeHeap::add_to_freelist(HeapBlock *a) { |
364 FreeBlock* b = (FreeBlock*)a; | 357 FreeBlock* b = (FreeBlock*)a; |
365 assert(b != _freelist, "cannot be removed twice"); | 358 assert(b != _freelist, "cannot be removed twice"); |
366 | 359 |
367 // Mark as free and update free space count | 360 // Mark as free and update free space count |
368 _free_segments += b->length(); | 361 _freelist_segments += b->length(); |
369 b->set_free(); | 362 b->set_free(); |
370 | 363 |
371 // First element in list? | 364 // First element in list? |
372 if (_freelist == NULL) { | 365 if (_freelist == NULL) { |
373 _freelist = b; | 366 _freelist = b; |
398 } | 391 } |
399 } | 392 } |
400 | 393 |
401 // Search freelist for an entry on the list with the best fit | 394 // Search freelist for an entry on the list with the best fit |
402 // Return NULL if no one was found | 395 // Return NULL if no one was found |
403 FreeBlock* CodeHeap::search_freelist(size_t length) { | 396 FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) { |
404 FreeBlock *best_block = NULL; | 397 FreeBlock *best_block = NULL; |
405 FreeBlock *best_prev = NULL; | 398 FreeBlock *best_prev = NULL; |
406 size_t best_length = 0; | 399 size_t best_length = 0; |
407 | 400 |
408 // Search for smallest block which is bigger than length | 401 // Search for smallest block which is bigger than length |
409 FreeBlock *prev = NULL; | 402 FreeBlock *prev = NULL; |
410 FreeBlock *cur = _freelist; | 403 FreeBlock *cur = _freelist; |
411 while(cur != NULL) { | 404 while(cur != NULL) { |
412 size_t l = cur->length(); | 405 size_t l = cur->length(); |
413 if (l >= length && (best_block == NULL || best_length > l)) { | 406 if (l >= length && (best_block == NULL || best_length > l)) { |
407 | |
408 // Non critical allocations are not allowed to use the last part of the code heap. | |
409 if (!is_critical) { | |
410 // Make sure the end of the allocation doesn't cross into the last part of the code heap | |
411 if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) { | |
412 // the freelist is sorted by address - if one fails, all consecutive will also fail. | |
413 break; | |
414 } | |
415 } | |
416 | |
414 // Remember best block, its previous element, and its length | 417 // Remember best block, its previous element, and its length |
415 best_block = cur; | 418 best_block = cur; |
416 best_prev = prev; | 419 best_prev = prev; |
417 best_length = best_block->length(); | 420 best_length = best_block->length(); |
418 } | 421 } |
450 mark_segmap_as_used(beg, beg + length); | 453 mark_segmap_as_used(beg, beg + length); |
451 best_block->set_length(length); | 454 best_block->set_length(length); |
452 } | 455 } |
453 | 456 |
454 best_block->set_used(); | 457 best_block->set_used(); |
455 _free_segments -= length; | 458 _freelist_segments -= length; |
456 return best_block; | 459 return best_block; |
457 } | 460 } |
458 | 461 |
459 //---------------------------------------------------------------------------- | 462 //---------------------------------------------------------------------------- |
460 // Non-product code | 463 // Non-product code |
476 len += b->length(); | 479 len += b->length(); |
477 count++; | 480 count++; |
478 } | 481 } |
479 | 482 |
480 // Verify that freelist contains the right amount of free space | 483 // Verify that freelist contains the right amount of free space |
481 // guarantee(len == _free_segments, "wrong freelist"); | 484 // guarantee(len == _freelist_segments, "wrong freelist"); |
482 | 485 |
483 // Verify that the number of free blocks is not out of hand. | 486 // Verify that the number of free blocks is not out of hand. |
484 static int free_block_threshold = 10000; | 487 static int free_block_threshold = 10000; |
485 if (count > free_block_threshold) { | 488 if (count > free_block_threshold) { |
486 warning("CodeHeap: # of free blocks > %d", free_block_threshold); | 489 warning("CodeHeap: # of free blocks > %d", free_block_threshold); |