Mercurial > hg > graal-compiler
comparison src/share/vm/code/codeCache.cpp @ 14687:480b0109db65
8029799: vm/mlvm/anonloader/stress/oome prints warning: CodeHeap: # of free blocks > 10000
Summary: Double CodeCacheSegmentSize from 64 byte to 128 bytes if tiered compilation is enabled
Reviewed-by: kvn, twisti
author | anoll |
---|---|
date | Fri, 07 Mar 2014 07:42:40 +0100 |
parents | 524b54a7f1b5 |
children | b51e29501f30 |
comparison
equal
deleted
inserted
replaced
14686:97586c131ac8 | 14687:480b0109db65 |
---|---|
196 (address)_heap->high() - (address)_heap->low_boundary()); | 196 (address)_heap->high() - (address)_heap->low_boundary()); |
197 } | 197 } |
198 } | 198 } |
199 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - | 199 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - |
200 (address)_heap->low_boundary()) - unallocated_capacity()); | 200 (address)_heap->low_boundary()) - unallocated_capacity()); |
201 verify_if_often(); | |
202 print_trace("allocation", cb, size); | 201 print_trace("allocation", cb, size); |
203 return cb; | 202 return cb; |
204 } | 203 } |
205 | 204 |
206 void CodeCache::free(CodeBlob* cb) { | 205 void CodeCache::free(CodeBlob* cb) { |
207 assert_locked_or_safepoint(CodeCache_lock); | 206 assert_locked_or_safepoint(CodeCache_lock); |
208 verify_if_often(); | |
209 | 207 |
210 print_trace("free", cb); | 208 print_trace("free", cb); |
211 if (cb->is_nmethod()) { | 209 if (cb->is_nmethod()) { |
212 _number_of_nmethods--; | 210 _number_of_nmethods--; |
213 if (((nmethod *)cb)->has_dependencies()) { | 211 if (((nmethod *)cb)->has_dependencies()) { |
219 } | 217 } |
220 _number_of_blobs--; | 218 _number_of_blobs--; |
221 | 219 |
222 _heap->deallocate(cb); | 220 _heap->deallocate(cb); |
223 | 221 |
224 verify_if_often(); | |
225 assert(_number_of_blobs >= 0, "sanity check"); | 222 assert(_number_of_blobs >= 0, "sanity check"); |
226 } | 223 } |
227 | 224 |
228 | 225 |
229 void CodeCache::commit(CodeBlob* cb) { | 226 void CodeCache::commit(CodeBlob* cb) { |
239 _number_of_adapters++; | 236 _number_of_adapters++; |
240 } | 237 } |
241 | 238 |
242 // flush the hardware I-cache | 239 // flush the hardware I-cache |
243 ICache::invalidate_range(cb->content_begin(), cb->content_size()); | 240 ICache::invalidate_range(cb->content_begin(), cb->content_size()); |
244 } | |
245 | |
246 | |
247 void CodeCache::flush() { | |
248 assert_locked_or_safepoint(CodeCache_lock); | |
249 Unimplemented(); | |
250 } | 241 } |
251 | 242 |
252 | 243 |
253 // Iteration over CodeBlobs | 244 // Iteration over CodeBlobs |
254 | 245 |
267 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain | 258 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain |
268 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. | 259 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. |
269 CodeBlob* CodeCache::find_blob(void* start) { | 260 CodeBlob* CodeCache::find_blob(void* start) { |
270 CodeBlob* result = find_blob_unsafe(start); | 261 CodeBlob* result = find_blob_unsafe(start); |
271 if (result == NULL) return NULL; | 262 if (result == NULL) return NULL; |
272 // We could potientially look up non_entrant methods | 263 // We could potentially look up non_entrant methods |
273 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); | 264 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); |
274 return result; | 265 return result; |
275 } | 266 } |
276 | 267 |
277 nmethod* CodeCache::find_nmethod(void* start) { | 268 nmethod* CodeCache::find_nmethod(void* start) { |
739 event.set_fullCount(_codemem_full_count); | 730 event.set_fullCount(_codemem_full_count); |
740 event.commit(); | 731 event.commit(); |
741 } | 732 } |
742 } | 733 } |
743 | 734 |
735 void CodeCache::print_memory_overhead() { | |
736 size_t wasted_bytes = 0; | |
737 CodeBlob *cb; | |
738 for (cb = first(); cb != NULL; cb = next(cb)) { | |
739 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; | |
740 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); | |
741 } | |
742 // Print bytes that are allocated in the freelist | |
743 ttyLocker ttl; | |
744 tty->print_cr("Number of elements in freelist: %d", freelist_length()); | |
745 tty->print_cr("Allocated in freelist: %dkB", bytes_allocated_in_freelist()/K); | |
746 tty->print_cr("Unused bytes in CodeBlobs: %dkB", (int)(wasted_bytes/K)); | |
747 tty->print_cr("Segment map size: %dkB", allocated_segments()/K); // 1 byte per segment | |
748 } | |
749 | |
744 //------------------------------------------------------------------------------------------------ | 750 //------------------------------------------------------------------------------------------------ |
745 // Non-product version | 751 // Non-product version |
746 | 752 |
747 #ifndef PRODUCT | 753 #ifndef PRODUCT |
748 | |
749 void CodeCache::verify_if_often() { | |
750 if (VerifyCodeCacheOften) { | |
751 _heap->verify(); | |
752 } | |
753 } | |
754 | 754 |
755 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { | 755 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { |
756 if (PrintCodeCache2) { // Need to add a new flag | 756 if (PrintCodeCache2) { // Need to add a new flag |
757 ResourceMark rm; | 757 ResourceMark rm; |
758 if (size == 0) size = cb->size(); | 758 if (size == 0) size = cb->size(); |
772 int nmethodNotEntrant = 0; | 772 int nmethodNotEntrant = 0; |
773 int nmethodZombie = 0; | 773 int nmethodZombie = 0; |
774 int nmethodUnloaded = 0; | 774 int nmethodUnloaded = 0; |
775 int nmethodJava = 0; | 775 int nmethodJava = 0; |
776 int nmethodNative = 0; | 776 int nmethodNative = 0; |
777 int maxCodeSize = 0; | 777 int max_nm_size = 0; |
778 ResourceMark rm; | 778 ResourceMark rm; |
779 | 779 |
780 CodeBlob *cb; | 780 CodeBlob *cb; |
781 for (cb = first(); cb != NULL; cb = next(cb)) { | 781 for (cb = first(); cb != NULL; cb = next(cb)) { |
782 total++; | 782 total++; |
796 | 796 |
797 if(nm->is_alive()) { nmethodAlive++; } | 797 if(nm->is_alive()) { nmethodAlive++; } |
798 if(nm->is_not_entrant()) { nmethodNotEntrant++; } | 798 if(nm->is_not_entrant()) { nmethodNotEntrant++; } |
799 if(nm->is_zombie()) { nmethodZombie++; } | 799 if(nm->is_zombie()) { nmethodZombie++; } |
800 if(nm->is_unloaded()) { nmethodUnloaded++; } | 800 if(nm->is_unloaded()) { nmethodUnloaded++; } |
801 if(nm->is_native_method()) { nmethodNative++; } | 801 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } |
802 | 802 |
803 if(nm->method() != NULL && nm->is_java_method()) { | 803 if(nm->method() != NULL && nm->is_java_method()) { |
804 nmethodJava++; | 804 nmethodJava++; |
805 if (nm->insts_size() > maxCodeSize) { | 805 max_nm_size = MAX2(max_nm_size, nm->size()); |
806 maxCodeSize = nm->insts_size(); | |
807 } | |
808 } | 806 } |
809 } else if (cb->is_runtime_stub()) { | 807 } else if (cb->is_runtime_stub()) { |
810 runtimeStubCount++; | 808 runtimeStubCount++; |
811 } else if (cb->is_deoptimization_stub()) { | 809 } else if (cb->is_deoptimization_stub()) { |
812 deoptimizationStubCount++; | 810 deoptimizationStubCount++; |
818 bufferBlobCount++; | 816 bufferBlobCount++; |
819 } | 817 } |
820 } | 818 } |
821 | 819 |
822 int bucketSize = 512; | 820 int bucketSize = 512; |
823 int bucketLimit = maxCodeSize / bucketSize + 1; | 821 int bucketLimit = max_nm_size / bucketSize + 1; |
824 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); | 822 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); |
825 memset(buckets,0,sizeof(int) * bucketLimit); | 823 memset(buckets, 0, sizeof(int) * bucketLimit); |
826 | 824 |
827 for (cb = first(); cb != NULL; cb = next(cb)) { | 825 for (cb = first(); cb != NULL; cb = next(cb)) { |
828 if (cb->is_nmethod()) { | 826 if (cb->is_nmethod()) { |
829 nmethod* nm = (nmethod*)cb; | 827 nmethod* nm = (nmethod*)cb; |
830 if(nm->is_java_method()) { | 828 if(nm->is_java_method()) { |
831 buckets[nm->insts_size() / bucketSize]++; | 829 buckets[nm->size() / bucketSize]++; |
832 } | 830 } |
833 } | 831 } |
834 } | 832 } |
833 | |
835 tty->print_cr("Code Cache Entries (total of %d)",total); | 834 tty->print_cr("Code Cache Entries (total of %d)",total); |
836 tty->print_cr("-------------------------------------------------"); | 835 tty->print_cr("-------------------------------------------------"); |
837 tty->print_cr("nmethods: %d",nmethodCount); | 836 tty->print_cr("nmethods: %d",nmethodCount); |
838 tty->print_cr("\talive: %d",nmethodAlive); | 837 tty->print_cr("\talive: %d",nmethodAlive); |
839 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); | 838 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); |
856 tty->print_cr("%d",buckets[i]); | 855 tty->print_cr("%d",buckets[i]); |
857 } | 856 } |
858 } | 857 } |
859 | 858 |
860 FREE_C_HEAP_ARRAY(int, buckets, mtCode); | 859 FREE_C_HEAP_ARRAY(int, buckets, mtCode); |
860 print_memory_overhead(); | |
861 } | 861 } |
862 | 862 |
863 #endif // !PRODUCT | 863 #endif // !PRODUCT |
864 | 864 |
865 void CodeCache::print() { | 865 void CodeCache::print() { |