Mercurial > hg > truffle
comparison src/share/vm/services/memBaseline.cpp @ 20804:7848fc12602b
Merge with jdk8u40-b25
author | Gilles Duboscq <gilles.m.duboscq@oracle.com> |
---|---|
date | Tue, 07 Apr 2015 14:58:49 +0200 |
parents | dd3939fe8424 |
children |
comparison
equal
deleted
inserted
replaced
20184:84105dcdb05b | 20804:7848fc12602b |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
20 * or visit www.oracle.com if you need additional information or have any | 20 * or visit www.oracle.com if you need additional information or have any |
21 * questions. | 21 * questions. |
22 * | 22 * |
23 */ | 23 */ |
24 #include "precompiled.hpp" | 24 #include "precompiled.hpp" |
25 | |
25 #include "memory/allocation.hpp" | 26 #include "memory/allocation.hpp" |
26 #include "runtime/safepoint.hpp" | 27 #include "runtime/safepoint.hpp" |
27 #include "runtime/thread.inline.hpp" | 28 #include "runtime/thread.inline.hpp" |
28 #include "services/memBaseline.hpp" | 29 #include "services/memBaseline.hpp" |
29 #include "services/memTracker.hpp" | 30 #include "services/memTracker.hpp" |
30 | 31 |
31 | 32 /* |
32 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = { | 33 * Sizes are sorted in descenting order for reporting |
33 {mtJavaHeap, "Java Heap"}, | 34 */ |
34 {mtClass, "Class"}, | 35 int compare_malloc_size(const MallocSite& s1, const MallocSite& s2) { |
35 {mtThreadStack,"Thread Stack"}, | 36 if (s1.size() == s2.size()) { |
36 {mtThread, "Thread"}, | 37 return 0; |
37 {mtCode, "Code"}, | 38 } else if (s1.size() > s2.size()) { |
38 {mtGC, "GC"}, | 39 return -1; |
39 {mtCompiler, "Compiler"}, | 40 } else { |
40 {mtInternal, "Internal"}, | 41 return 1; |
41 {mtOther, "Other"}, | 42 } |
42 {mtSymbol, "Symbol"}, | 43 } |
43 {mtNMT, "Memory Tracking"}, | 44 |
44 {mtTracing, "Tracing"}, | 45 |
45 {mtChunk, "Pooled Free Chunks"}, | 46 int compare_virtual_memory_size(const VirtualMemoryAllocationSite& s1, |
46 {mtClassShared,"Shared spaces for classes"}, | 47 const VirtualMemoryAllocationSite& s2) { |
47 {mtTest, "Test"}, | 48 if (s1.reserved() == s2.reserved()) { |
48 {mtNone, "Unknown"} // It can happen when type tagging records are lagging | 49 return 0; |
49 // behind | 50 } else if (s1.reserved() > s2.reserved()) { |
51 return -1; | |
52 } else { | |
53 return 1; | |
54 } | |
55 } | |
56 | |
57 // Sort into allocation site addresses order for baseline comparison | |
58 int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) { | |
59 return s1.call_stack()->compare(*s2.call_stack()); | |
60 } | |
61 | |
62 | |
63 int compare_virtual_memory_site(const VirtualMemoryAllocationSite& s1, | |
64 const VirtualMemoryAllocationSite& s2) { | |
65 return s1.call_stack()->compare(*s2.call_stack()); | |
66 } | |
67 | |
68 /* | |
69 * Walker to walk malloc allocation site table | |
70 */ | |
71 class MallocAllocationSiteWalker : public MallocSiteWalker { | |
72 private: | |
73 SortedLinkedList<MallocSite, compare_malloc_size> _malloc_sites; | |
74 size_t _count; | |
75 | |
76 // Entries in MallocSiteTable with size = 0 and count = 0, | |
77 // when the malloc site is not longer there. | |
78 public: | |
79 MallocAllocationSiteWalker() : _count(0) { } | |
80 | |
81 inline size_t count() const { return _count; } | |
82 | |
83 LinkedList<MallocSite>* malloc_sites() { | |
84 return &_malloc_sites; | |
85 } | |
86 | |
87 bool do_malloc_site(const MallocSite* site) { | |
88 if (site->size() >= MemBaseline::SIZE_THRESHOLD) { | |
89 if (_malloc_sites.add(*site) != NULL) { | |
90 _count++; | |
91 return true; | |
92 } else { | |
93 return false; // OOM | |
94 } | |
95 } else { | |
96 // malloc site does not meet threshold, ignore and continue | |
97 return true; | |
98 } | |
99 } | |
50 }; | 100 }; |
51 | 101 |
52 MemBaseline::MemBaseline() { | 102 // Compare virtual memory region's base address |
53 _baselined = false; | 103 int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) { |
54 | 104 return r1.compare(r2); |
55 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { | 105 } |
56 _malloc_data[index].set_type(MemType2NameMap[index]._flag); | 106 |
57 _vm_data[index].set_type(MemType2NameMap[index]._flag); | 107 // Walk all virtual memory regions for baselining |
58 _arena_data[index].set_type(MemType2NameMap[index]._flag); | 108 class VirtualMemoryAllocationWalker : public VirtualMemoryWalker { |
59 } | 109 private: |
60 | 110 SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base> |
61 _malloc_cs = NULL; | 111 _virtual_memory_regions; |
62 _vm_cs = NULL; | 112 size_t _count; |
63 _vm_map = NULL; | 113 |
64 | 114 public: |
65 _number_of_classes = 0; | 115 VirtualMemoryAllocationWalker() : _count(0) { } |
66 _number_of_threads = 0; | 116 |
67 } | 117 bool do_allocation_site(const ReservedMemoryRegion* rgn) { |
68 | 118 if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) { |
69 | 119 if (_virtual_memory_regions.add(*rgn) != NULL) { |
70 void MemBaseline::clear() { | 120 _count ++; |
71 if (_malloc_cs != NULL) { | 121 return true; |
72 delete _malloc_cs; | 122 } else { |
73 _malloc_cs = NULL; | 123 return false; |
74 } | |
75 | |
76 if (_vm_cs != NULL) { | |
77 delete _vm_cs; | |
78 _vm_cs = NULL; | |
79 } | |
80 | |
81 if (_vm_map != NULL) { | |
82 delete _vm_map; | |
83 _vm_map = NULL; | |
84 } | |
85 | |
86 reset(); | |
87 } | |
88 | |
89 | |
90 void MemBaseline::reset() { | |
91 _baselined = false; | |
92 _total_vm_reserved = 0; | |
93 _total_vm_committed = 0; | |
94 _total_malloced = 0; | |
95 _number_of_classes = 0; | |
96 | |
97 if (_malloc_cs != NULL) _malloc_cs->clear(); | |
98 if (_vm_cs != NULL) _vm_cs->clear(); | |
99 if (_vm_map != NULL) _vm_map->clear(); | |
100 | |
101 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { | |
102 _malloc_data[index].clear(); | |
103 _vm_data[index].clear(); | |
104 _arena_data[index].clear(); | |
105 } | |
106 } | |
107 | |
108 MemBaseline::~MemBaseline() { | |
109 clear(); | |
110 } | |
111 | |
112 // baseline malloc'd memory records, generate overall summary and summaries by | |
113 // memory types | |
114 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) { | |
115 MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records); | |
116 MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current(); | |
117 size_t used_arena_size = 0; | |
118 int index; | |
119 while (malloc_ptr != NULL) { | |
120 index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags())); | |
121 size_t size = malloc_ptr->size(); | |
122 if (malloc_ptr->is_arena_memory_record()) { | |
123 // We do have anonymous arenas, they are either used as value objects, | |
124 // which are embedded inside other objects, or used as stack objects. | |
125 _arena_data[index].inc(size); | |
126 used_arena_size += size; | |
127 } else { | |
128 _total_malloced += size; | |
129 _malloc_data[index].inc(size); | |
130 if (malloc_ptr->is_arena_record()) { | |
131 // see if arena memory record present | |
132 MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next(); | |
133 if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) { | |
134 assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr), | |
135 "Arena records do not match"); | |
136 size = next_malloc_ptr->size(); | |
137 _arena_data[index].inc(size); | |
138 used_arena_size += size; | |
139 malloc_itr.next(); | |
140 } | |
141 } | 124 } |
142 } | 125 } |
143 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next(); | 126 return true; |
144 } | 127 } |
145 | 128 |
146 // substract used arena size to get size of arena chunk in free list | 129 LinkedList<ReservedMemoryRegion>* virtual_memory_allocations() { |
147 index = flag2index(mtChunk); | 130 return &_virtual_memory_regions; |
148 _malloc_data[index].reduce(used_arena_size); | 131 } |
149 // we really don't know how many chunks in free list, so just set to | 132 }; |
150 // 0 | 133 |
151 _malloc_data[index].overwrite_counter(0); | 134 |
152 | 135 bool MemBaseline::baseline_summary() { |
136 MallocMemorySummary::snapshot(&_malloc_memory_snapshot); | |
137 VirtualMemorySummary::snapshot(&_virtual_memory_snapshot); | |
153 return true; | 138 return true; |
154 } | 139 } |
155 | 140 |
156 // check if there is a safepoint in progress, if so, block the thread | 141 bool MemBaseline::baseline_allocation_sites() { |
157 // for the safepoint | 142 // Malloc allocation sites |
158 void MemBaseline::check_safepoint(JavaThread* thr) { | 143 MallocAllocationSiteWalker malloc_walker; |
159 if (SafepointSynchronize::is_synchronizing()) { | 144 if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) { |
160 // grab and drop the SR_lock to honor the safepoint protocol | 145 return false; |
161 MutexLocker ml(thr->SR_lock()); | 146 } |
162 } | 147 |
163 } | 148 _malloc_sites.move(malloc_walker.malloc_sites()); |
164 | 149 // The malloc sites are collected in size order |
165 // baseline mmap'd memory records, generate overall summary and summaries by | 150 _malloc_sites_order = by_size; |
166 // memory types | 151 |
167 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) { | 152 // Virtual memory allocation sites |
168 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records); | 153 VirtualMemoryAllocationWalker virtual_memory_walker; |
169 VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current(); | 154 if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) { |
170 int index; | 155 return false; |
171 while (vm_ptr != NULL) { | 156 } |
172 if (vm_ptr->is_reserved_region()) { | 157 |
173 index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags())); | 158 // Virtual memory allocations are collected in call stack order |
174 // we use the number of thread stack to count threads | 159 _virtual_memory_allocations.move(virtual_memory_walker.virtual_memory_allocations()); |
175 if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) { | 160 |
176 _number_of_threads ++; | 161 if (!aggregate_virtual_memory_allocation_sites()) { |
162 return false; | |
163 } | |
164 // Virtual memory allocation sites are aggregrated in call stack order | |
165 _virtual_memory_sites_order = by_address; | |
166 | |
167 return true; | |
168 } | |
169 | |
170 bool MemBaseline::baseline(bool summaryOnly) { | |
171 reset(); | |
172 | |
173 _class_count = InstanceKlass::number_of_instance_classes(); | |
174 | |
175 if (!baseline_summary()) { | |
176 return false; | |
177 } | |
178 | |
179 _baseline_type = Summary_baselined; | |
180 | |
181 // baseline details | |
182 if (!summaryOnly && | |
183 MemTracker::tracking_level() == NMT_detail) { | |
184 baseline_allocation_sites(); | |
185 _baseline_type = Detail_baselined; | |
186 } | |
187 | |
188 return true; | |
189 } | |
190 | |
191 int compare_allocation_site(const VirtualMemoryAllocationSite& s1, | |
192 const VirtualMemoryAllocationSite& s2) { | |
193 return s1.call_stack()->compare(*s2.call_stack()); | |
194 } | |
195 | |
196 bool MemBaseline::aggregate_virtual_memory_allocation_sites() { | |
197 SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site> allocation_sites; | |
198 | |
199 VirtualMemoryAllocationIterator itr = virtual_memory_allocations(); | |
200 const ReservedMemoryRegion* rgn; | |
201 VirtualMemoryAllocationSite* site; | |
202 while ((rgn = itr.next()) != NULL) { | |
203 VirtualMemoryAllocationSite tmp(*rgn->call_stack()); | |
204 site = allocation_sites.find(tmp); | |
205 if (site == NULL) { | |
206 LinkedListNode<VirtualMemoryAllocationSite>* node = | |
207 allocation_sites.add(tmp); | |
208 if (node == NULL) return false; | |
209 site = node->data(); | |
177 } | 210 } |
178 _total_vm_reserved += vm_ptr->size(); | 211 site->reserve_memory(rgn->size()); |
179 _vm_data[index].inc(vm_ptr->size(), 0); | 212 site->commit_memory(rgn->committed_size()); |
180 } else { | 213 } |
181 _total_vm_committed += vm_ptr->size(); | 214 |
182 _vm_data[index].inc(0, vm_ptr->size()); | 215 _virtual_memory_sites.move(&allocation_sites); |
183 } | |
184 vm_ptr = (VMMemRegion*)vm_itr.next(); | |
185 } | |
186 return true; | 216 return true; |
187 } | 217 } |
188 | 218 |
189 // baseline malloc'd memory by callsites, but only the callsites with memory allocation | 219 MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) { |
190 // over 1KB are stored. | 220 assert(!_malloc_sites.is_empty(), "Not detail baseline"); |
191 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) { | 221 switch(order) { |
192 assert(MemTracker::track_callsite(), "detail tracking is off"); | 222 case by_size: |
193 | 223 malloc_sites_to_size_order(); |
194 MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records)); | 224 break; |
195 MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current(); | 225 case by_site: |
196 MallocCallsitePointer malloc_callsite; | 226 malloc_sites_to_allocation_site_order(); |
197 | 227 break; |
198 // initailize malloc callsite array | 228 case by_address: |
199 if (_malloc_cs == NULL) { | 229 default: |
200 _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64); | 230 ShouldNotReachHere(); |
201 // out of native memory | 231 } |
202 if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) { | 232 return MallocSiteIterator(_malloc_sites.head()); |
203 return false; | 233 } |
204 } | 234 |
205 } else { | 235 VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) { |
206 _malloc_cs->clear(); | 236 assert(!_virtual_memory_sites.is_empty(), "Not detail baseline"); |
207 } | 237 switch(order) { |
208 | 238 case by_size: |
209 MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records); | 239 virtual_memory_sites_to_size_order(); |
210 | 240 break; |
211 // sort into callsite pc order. Details are aggregated by callsites | 241 case by_site: |
212 malloc_data->sort((FN_SORT)malloc_sort_by_pc); | 242 virtual_memory_sites_to_reservation_site_order(); |
213 bool ret = true; | 243 break; |
214 | 244 case by_address: |
215 // baseline memory that is totaled over 1 KB | 245 default: |
216 while (malloc_ptr != NULL) { | 246 ShouldNotReachHere(); |
217 if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) { | 247 } |
218 // skip thread stacks | 248 return VirtualMemorySiteIterator(_virtual_memory_sites.head()); |
219 if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) { | 249 } |
220 if (malloc_callsite.addr() != malloc_ptr->pc()) { | 250 |
221 if ((malloc_callsite.amount()/K) > 0) { | 251 |
222 if (!_malloc_cs->append(&malloc_callsite)) { | 252 // Sorting allocations sites in different orders |
223 ret = false; | 253 void MemBaseline::malloc_sites_to_size_order() { |
224 break; | 254 if (_malloc_sites_order != by_size) { |
225 } | 255 SortedLinkedList<MallocSite, compare_malloc_size> tmp; |
226 } | 256 |
227 malloc_callsite = MallocCallsitePointer(malloc_ptr->pc()); | 257 // Add malloc sites to sorted linked list to sort into size order |
228 } | 258 tmp.move(&_malloc_sites); |
229 malloc_callsite.inc(malloc_ptr->size()); | 259 _malloc_sites.set_head(tmp.head()); |
230 } | 260 tmp.set_head(NULL); |
231 } | 261 _malloc_sites_order = by_size; |
232 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next(); | 262 } |
233 } | 263 } |
234 | 264 |
235 // restore to address order. Snapshot malloc data is maintained in memory | 265 void MemBaseline::malloc_sites_to_allocation_site_order() { |
236 // address order. | 266 if (_malloc_sites_order != by_site) { |
237 malloc_data->sort((FN_SORT)malloc_sort_by_addr); | 267 SortedLinkedList<MallocSite, compare_malloc_site> tmp; |
238 | 268 // Add malloc sites to sorted linked list to sort into site (address) order |
239 if (!ret) { | 269 tmp.move(&_malloc_sites); |
240 return false; | 270 _malloc_sites.set_head(tmp.head()); |
241 } | 271 tmp.set_head(NULL); |
242 // deal with last record | 272 _malloc_sites_order = by_site; |
243 if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) { | 273 } |
244 if (!_malloc_cs->append(&malloc_callsite)) { | 274 } |
245 return false; | 275 |
246 } | 276 void MemBaseline::virtual_memory_sites_to_size_order() { |
247 } | 277 if (_virtual_memory_sites_order != by_size) { |
248 return true; | 278 SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size> tmp; |
249 } | 279 |
250 | 280 tmp.move(&_virtual_memory_sites); |
251 // baseline mmap'd memory by callsites | 281 |
252 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) { | 282 _virtual_memory_sites.set_head(tmp.head()); |
253 assert(MemTracker::track_callsite(), "detail tracking is off"); | 283 tmp.set_head(NULL); |
254 | 284 _virtual_memory_sites_order = by_size; |
255 VMCallsitePointer vm_callsite; | 285 } |
256 VMCallsitePointer* cur_callsite = NULL; | 286 } |
257 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records); | 287 |
258 VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current(); | 288 void MemBaseline::virtual_memory_sites_to_reservation_site_order() { |
259 | 289 if (_virtual_memory_sites_order != by_size) { |
260 // initialize virtual memory map array | 290 SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site> tmp; |
261 if (_vm_map == NULL) { | 291 |
262 _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length()); | 292 tmp.move(&_virtual_memory_sites); |
263 if (_vm_map == NULL || _vm_map->out_of_memory()) { | 293 |
264 return false; | 294 _virtual_memory_sites.set_head(tmp.head()); |
265 } | 295 tmp.set_head(NULL); |
266 } else { | 296 |
267 _vm_map->clear(); | 297 _virtual_memory_sites_order = by_size; |
268 } | 298 } |
269 | 299 } |
270 // initialize virtual memory callsite array | 300 |
271 if (_vm_cs == NULL) { | |
272 _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64); | |
273 if (_vm_cs == NULL || _vm_cs->out_of_memory()) { | |
274 return false; | |
275 } | |
276 } else { | |
277 _vm_cs->clear(); | |
278 } | |
279 | |
280 // consolidate virtual memory data | |
281 VMMemRegionEx* reserved_rec = NULL; | |
282 VMMemRegionEx* committed_rec = NULL; | |
283 | |
284 // vm_ptr is coming in increasing base address order | |
285 while (vm_ptr != NULL) { | |
286 if (vm_ptr->is_reserved_region()) { | |
287 // consolidate reserved memory regions for virtual memory map. | |
288 // The criteria for consolidation is: | |
289 // 1. two adjacent reserved memory regions | |
290 // 2. belong to the same memory type | |
291 // 3. reserved from the same callsite | |
292 if (reserved_rec == NULL || | |
293 reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() || | |
294 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) || | |
295 reserved_rec->pc() != vm_ptr->pc()) { | |
296 if (!_vm_map->append(vm_ptr)) { | |
297 return false; | |
298 } | |
299 // inserted reserved region, we need the pointer to the element in virtual | |
300 // memory map array. | |
301 reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1); | |
302 } else { | |
303 reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size()); | |
304 } | |
305 | |
306 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) { | |
307 return false; | |
308 } | |
309 vm_callsite = VMCallsitePointer(vm_ptr->pc()); | |
310 cur_callsite = &vm_callsite; | |
311 vm_callsite.inc(vm_ptr->size(), 0); | |
312 } else { | |
313 // consolidate committed memory regions for virtual memory map | |
314 // The criterial is: | |
315 // 1. two adjacent committed memory regions | |
316 // 2. committed from the same callsite | |
317 if (committed_rec == NULL || | |
318 committed_rec->base() + committed_rec->size() != vm_ptr->addr() || | |
319 committed_rec->pc() != vm_ptr->pc()) { | |
320 if (!_vm_map->append(vm_ptr)) { | |
321 return false; | |
322 } | |
323 committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1); | |
324 } else { | |
325 committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size()); | |
326 } | |
327 vm_callsite.inc(0, vm_ptr->size()); | |
328 } | |
329 vm_ptr = (VMMemRegionEx*)vm_itr.next(); | |
330 } | |
331 // deal with last record | |
332 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) { | |
333 return false; | |
334 } | |
335 | |
336 // sort it into callsite pc order. Details are aggregated by callsites | |
337 _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc); | |
338 | |
339 // walk the array to consolidate record by pc | |
340 MemPointerArrayIteratorImpl itr(_vm_cs); | |
341 VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current(); | |
342 VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next(); | |
343 while (next_rec != NULL) { | |
344 assert(callsite_rec != NULL, "Sanity check"); | |
345 if (next_rec->addr() == callsite_rec->addr()) { | |
346 callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount()); | |
347 itr.remove(); | |
348 next_rec = (VMCallsitePointer*)itr.current(); | |
349 } else { | |
350 callsite_rec = next_rec; | |
351 next_rec = (VMCallsitePointer*)itr.next(); | |
352 } | |
353 } | |
354 | |
355 return true; | |
356 } | |
357 | |
358 // baseline a snapshot. If summary_only = false, memory usages aggregated by | |
359 // callsites are also baselined. | |
360 // The method call can be lengthy, especially when detail tracking info is | |
361 // requested. So the method checks for safepoint explicitly. | |
362 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) { | |
363 Thread* THREAD = Thread::current(); | |
364 assert(THREAD->is_Java_thread(), "must be a JavaThread"); | |
365 MutexLocker snapshot_locker(snapshot._lock); | |
366 reset(); | |
367 _baselined = baseline_malloc_summary(snapshot._alloc_ptrs); | |
368 if (_baselined) { | |
369 check_safepoint((JavaThread*)THREAD); | |
370 _baselined = baseline_vm_summary(snapshot._vm_ptrs); | |
371 } | |
372 _number_of_classes = snapshot.number_of_classes(); | |
373 | |
374 if (!summary_only && MemTracker::track_callsite() && _baselined) { | |
375 check_safepoint((JavaThread*)THREAD); | |
376 _baselined = baseline_malloc_details(snapshot._alloc_ptrs); | |
377 if (_baselined) { | |
378 check_safepoint((JavaThread*)THREAD); | |
379 _baselined = baseline_vm_details(snapshot._vm_ptrs); | |
380 } | |
381 } | |
382 return _baselined; | |
383 } | |
384 | |
385 | |
386 int MemBaseline::flag2index(MEMFLAGS flag) const { | |
387 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { | |
388 if (MemType2NameMap[index]._flag == flag) { | |
389 return index; | |
390 } | |
391 } | |
392 assert(false, "no type"); | |
393 return -1; | |
394 } | |
395 | |
396 const char* MemBaseline::type2name(MEMFLAGS type) { | |
397 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { | |
398 if (MemType2NameMap[index]._flag == type) { | |
399 return MemType2NameMap[index]._name; | |
400 } | |
401 } | |
402 assert(false, err_msg("bad type %x", type)); | |
403 return NULL; | |
404 } | |
405 | |
406 | |
407 MemBaseline& MemBaseline::operator=(const MemBaseline& other) { | |
408 _total_malloced = other._total_malloced; | |
409 _total_vm_reserved = other._total_vm_reserved; | |
410 _total_vm_committed = other._total_vm_committed; | |
411 | |
412 _baselined = other._baselined; | |
413 _number_of_classes = other._number_of_classes; | |
414 | |
415 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { | |
416 _malloc_data[index] = other._malloc_data[index]; | |
417 _vm_data[index] = other._vm_data[index]; | |
418 _arena_data[index] = other._arena_data[index]; | |
419 } | |
420 | |
421 if (MemTracker::track_callsite()) { | |
422 assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory"); | |
423 assert(other._malloc_cs != NULL && other._vm_cs != NULL, | |
424 "not properly baselined"); | |
425 _malloc_cs->clear(); | |
426 _vm_cs->clear(); | |
427 int index; | |
428 for (index = 0; index < other._malloc_cs->length(); index ++) { | |
429 _malloc_cs->append(other._malloc_cs->at(index)); | |
430 } | |
431 | |
432 for (index = 0; index < other._vm_cs->length(); index ++) { | |
433 _vm_cs->append(other._vm_cs->at(index)); | |
434 } | |
435 } | |
436 return *this; | |
437 } | |
438 | |
439 /* compare functions for sorting */ | |
440 | |
441 // sort snapshot malloc'd records in callsite pc order | |
442 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) { | |
443 assert(MemTracker::track_callsite(),"Just check"); | |
444 const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1; | |
445 const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2; | |
446 return UNSIGNED_COMPARE(mp1->pc(), mp2->pc()); | |
447 } | |
448 | |
449 // sort baselined malloc'd records in size order | |
450 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) { | |
451 assert(MemTracker::is_on(), "Just check"); | |
452 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1; | |
453 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2; | |
454 return UNSIGNED_COMPARE(mp2->amount(), mp1->amount()); | |
455 } | |
456 | |
457 // sort baselined malloc'd records in callsite pc order | |
458 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) { | |
459 assert(MemTracker::is_on(), "Just check"); | |
460 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1; | |
461 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2; | |
462 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); | |
463 } | |
464 | |
465 | |
466 // sort baselined mmap'd records in size (reserved size) order | |
467 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) { | |
468 assert(MemTracker::is_on(), "Just check"); | |
469 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1; | |
470 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2; | |
471 return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount()); | |
472 } | |
473 | |
474 // sort baselined mmap'd records in callsite pc order | |
475 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) { | |
476 assert(MemTracker::is_on(), "Just check"); | |
477 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1; | |
478 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2; | |
479 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); | |
480 } | |
481 | |
482 | |
483 // sort snapshot malloc'd records in memory block address order | |
484 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) { | |
485 assert(MemTracker::is_on(), "Just check"); | |
486 const MemPointerRecord* mp1 = (const MemPointerRecord*)p1; | |
487 const MemPointerRecord* mp2 = (const MemPointerRecord*)p2; | |
488 int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); | |
489 assert(p1 == p2 || delta != 0, "dup pointer"); | |
490 return delta; | |
491 } | |
492 |