20360
|
1 /*
|
|
2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 * or visit www.oracle.com if you need additional information or have any
|
|
21 * questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 #ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
|
|
26 #define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
|
|
27
|
|
28 #if INCLUDE_NMT
|
|
29
|
|
30 #include "memory/allocation.hpp"
|
|
31 #include "runtime/atomic.hpp"
|
|
32 #include "services/nmtCommon.hpp"
|
|
33 #include "utilities/nativeCallStack.hpp"
|
|
34
|
|
35 /*
|
|
36 * This counter class counts memory allocation and deallocation,
|
|
37 * records total memory allocation size and number of allocations.
|
|
38 * The counters are updated atomically.
|
|
39 */
|
|
40 class MemoryCounter VALUE_OBJ_CLASS_SPEC {
|
|
41 private:
|
|
42 size_t _count;
|
|
43 size_t _size;
|
|
44
|
|
45 DEBUG_ONLY(size_t _peak_count;)
|
|
46 DEBUG_ONLY(size_t _peak_size; )
|
|
47
|
|
48 public:
|
|
49 MemoryCounter() : _count(0), _size(0) {
|
|
50 DEBUG_ONLY(_peak_count = 0;)
|
|
51 DEBUG_ONLY(_peak_size = 0;)
|
|
52 }
|
|
53
|
|
54 // Reset counters
|
|
55 void reset() {
|
|
56 _size = 0;
|
|
57 _count = 0;
|
|
58 DEBUG_ONLY(_peak_size = 0;)
|
|
59 DEBUG_ONLY(_peak_count = 0;)
|
|
60 }
|
|
61
|
|
62 inline void allocate(size_t sz) {
|
|
63 Atomic::add(1, (volatile MemoryCounterType*)&_count);
|
|
64 if (sz > 0) {
|
|
65 Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
|
|
66 DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
|
|
67 }
|
|
68 DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
|
|
69 }
|
|
70
|
|
71 inline void deallocate(size_t sz) {
|
|
72 assert(_count > 0, "Negative counter");
|
|
73 assert(_size >= sz, "Negative size");
|
|
74 Atomic::add(-1, (volatile MemoryCounterType*)&_count);
|
|
75 if (sz > 0) {
|
|
76 Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
|
|
77 }
|
|
78 }
|
|
79
|
|
80 inline void resize(long sz) {
|
|
81 if (sz != 0) {
|
|
82 Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
|
|
83 DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
|
|
84 }
|
|
85 }
|
|
86
|
|
87 inline size_t count() const { return _count; }
|
|
88 inline size_t size() const { return _size; }
|
|
89 DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
|
|
90 DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; })
|
|
91
|
|
92 };
|
|
93
|
|
94 /*
|
|
95 * Malloc memory used by a particular subsystem.
|
|
96 * It includes the memory acquired through os::malloc()
|
|
97 * call and arena's backing memory.
|
|
98 */
|
|
99 class MallocMemory VALUE_OBJ_CLASS_SPEC {
|
|
100 private:
|
|
101 MemoryCounter _malloc;
|
|
102 MemoryCounter _arena;
|
|
103
|
|
104 public:
|
|
105 MallocMemory() { }
|
|
106
|
|
107 inline void record_malloc(size_t sz) {
|
|
108 _malloc.allocate(sz);
|
|
109 }
|
|
110
|
|
111 inline void record_free(size_t sz) {
|
|
112 _malloc.deallocate(sz);
|
|
113 }
|
|
114
|
|
115 inline void record_new_arena() {
|
|
116 _arena.allocate(0);
|
|
117 }
|
|
118
|
|
119 inline void record_arena_free() {
|
|
120 _arena.deallocate(0);
|
|
121 }
|
|
122
|
|
123 inline void record_arena_size_change(long sz) {
|
|
124 _arena.resize(sz);
|
|
125 }
|
|
126
|
|
127 void reset() {
|
|
128 _malloc.reset();
|
|
129 _arena.reset();
|
|
130 }
|
|
131
|
|
132 inline size_t malloc_size() const { return _malloc.size(); }
|
|
133 inline size_t malloc_count() const { return _malloc.count();}
|
|
134 inline size_t arena_size() const { return _arena.size(); }
|
|
135 inline size_t arena_count() const { return _arena.count(); }
|
|
136
|
|
137 DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
|
|
138 DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; })
|
|
139 };
|
|
140
|
|
141 class MallocMemorySummary;
|
|
142
|
|
143 // A snapshot of malloc'd memory, includes malloc memory
|
|
144 // usage by types and memory used by tracking itself.
|
|
145 class MallocMemorySnapshot : public ResourceObj {
|
|
146 friend class MallocMemorySummary;
|
|
147
|
|
148 private:
|
|
149 MallocMemory _malloc[mt_number_of_types];
|
|
150 MemoryCounter _tracking_header;
|
|
151
|
|
152
|
|
153 public:
|
|
154 inline MallocMemory* by_type(MEMFLAGS flags) {
|
|
155 int index = NMTUtil::flag_to_index(flags);
|
|
156 return &_malloc[index];
|
|
157 }
|
|
158
|
|
159 inline MallocMemory* by_index(int index) {
|
|
160 assert(index >= 0, "Index out of bound");
|
|
161 assert(index < mt_number_of_types, "Index out of bound");
|
|
162 return &_malloc[index];
|
|
163 }
|
|
164
|
|
165 inline MemoryCounter* malloc_overhead() {
|
|
166 return &_tracking_header;
|
|
167 }
|
|
168
|
|
169 // Total malloc'd memory amount
|
|
170 size_t total() const;
|
|
171 // Total malloc'd memory used by arenas
|
|
172 size_t total_arena() const;
|
|
173
|
20366
|
174 inline size_t thread_count() const {
|
|
175 MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
|
|
176 return s->by_type(mtThreadStack)->malloc_count();
|
20360
|
177 }
|
|
178
|
|
179 void reset();
|
|
180
|
|
181 void copy_to(MallocMemorySnapshot* s) {
|
|
182 s->_tracking_header = _tracking_header;
|
|
183 for (int index = 0; index < mt_number_of_types; index ++) {
|
|
184 s->_malloc[index] = _malloc[index];
|
|
185 }
|
|
186 }
|
|
187
|
|
188 // Make adjustment by subtracting chunks used by arenas
|
|
189 // from total chunks to get total free chunk size
|
|
190 void make_adjustment();
|
|
191 };
|
|
192
|
|
193 /*
|
|
194 * This class is for collecting malloc statistics at summary level
|
|
195 */
|
|
196 class MallocMemorySummary : AllStatic {
|
|
197 private:
|
|
198 // Reserve memory for placement of MallocMemorySnapshot object
|
|
199 static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
|
|
200
|
|
201 public:
|
|
202 static void initialize();
|
|
203
|
|
204 static inline void record_malloc(size_t size, MEMFLAGS flag) {
|
|
205 as_snapshot()->by_type(flag)->record_malloc(size);
|
|
206 }
|
|
207
|
|
208 static inline void record_free(size_t size, MEMFLAGS flag) {
|
|
209 as_snapshot()->by_type(flag)->record_free(size);
|
|
210 }
|
|
211
|
|
212 static inline void record_new_arena(MEMFLAGS flag) {
|
|
213 as_snapshot()->by_type(flag)->record_new_arena();
|
|
214 }
|
|
215
|
|
216 static inline void record_arena_free(MEMFLAGS flag) {
|
|
217 as_snapshot()->by_type(flag)->record_arena_free();
|
|
218 }
|
|
219
|
|
220 static inline void record_arena_size_change(long size, MEMFLAGS flag) {
|
|
221 as_snapshot()->by_type(flag)->record_arena_size_change(size);
|
|
222 }
|
|
223
|
|
224 static void snapshot(MallocMemorySnapshot* s) {
|
|
225 as_snapshot()->copy_to(s);
|
|
226 s->make_adjustment();
|
|
227 }
|
|
228
|
|
229 // Record memory used by malloc tracking header
|
|
230 static inline void record_new_malloc_header(size_t sz) {
|
|
231 as_snapshot()->malloc_overhead()->allocate(sz);
|
|
232 }
|
|
233
|
|
234 static inline void record_free_malloc_header(size_t sz) {
|
|
235 as_snapshot()->malloc_overhead()->deallocate(sz);
|
|
236 }
|
|
237
|
|
238 // The memory used by malloc tracking headers
|
|
239 static inline size_t tracking_overhead() {
|
|
240 return as_snapshot()->malloc_overhead()->size();
|
|
241 }
|
|
242
|
|
243 // Reset all counters to zero
|
|
244 static void reset() {
|
|
245 as_snapshot()->reset();
|
|
246 }
|
|
247
|
|
248 static MallocMemorySnapshot* as_snapshot() {
|
|
249 return (MallocMemorySnapshot*)_snapshot;
|
|
250 }
|
|
251 };
|
|
252
|
|
253
|
|
254 /*
|
|
255 * Malloc tracking header.
|
|
256 * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
|
|
257 * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
|
|
258 */
|
|
259
|
|
260 class MallocHeader VALUE_OBJ_CLASS_SPEC {
|
|
261 #ifdef _LP64
|
|
262 size_t _size : 62;
|
|
263 size_t _level : 2;
|
|
264 size_t _flags : 8;
|
|
265 size_t _pos_idx : 16;
|
|
266 size_t _bucket_idx: 40;
|
|
267 #define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
|
|
268 #define MAX_BUCKET_LENGTH ((size_t)(1 << 16))
|
|
269 #define MAX_MALLOC_SIZE (((size_t)1 << 62) - 1)
|
|
270 #else
|
|
271 size_t _size : 30;
|
|
272 size_t _level : 2;
|
|
273 size_t _flags : 8;
|
|
274 size_t _pos_idx : 8;
|
|
275 size_t _bucket_idx: 16;
|
|
276 #define MAX_MALLOCSITE_TABLE_SIZE ((size_t)(1 << 16))
|
|
277 #define MAX_BUCKET_LENGTH ((size_t)(1 << 8))
|
|
278 // Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
|
|
279 #define MAX_MALLOC_SIZE ((size_t)(1 << 30) - 1)
|
|
280 #endif // _LP64
|
|
281
|
|
282 public:
|
|
283 // Summary tracking header
|
|
284 MallocHeader(size_t size, MEMFLAGS flags) {
|
|
285 assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
|
286 "Wrong header size");
|
|
287
|
|
288 _level = NMT_summary;
|
|
289 _flags = flags;
|
|
290 set_size(size);
|
|
291 MallocMemorySummary::record_malloc(size, flags);
|
|
292 MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
|
|
293 }
|
|
294 // Detail tracking header
|
|
295 MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
|
|
296 assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
|
297 "Wrong header size");
|
|
298
|
|
299 _level = NMT_detail;
|
|
300 _flags = flags;
|
|
301 set_size(size);
|
|
302 size_t bucket_idx;
|
|
303 size_t pos_idx;
|
|
304 if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
|
|
305 assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
|
|
306 assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
|
|
307 _bucket_idx = bucket_idx;
|
|
308 _pos_idx = pos_idx;
|
|
309 }
|
|
310 MallocMemorySummary::record_malloc(size, flags);
|
|
311 MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
|
|
312 }
|
|
313 // Minimal tracking header
|
|
314 MallocHeader() {
|
|
315 assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
|
316 "Wrong header size");
|
|
317
|
|
318 _level = (unsigned short)NMT_minimal;
|
|
319 }
|
|
320
|
|
321 inline NMT_TrackingLevel tracking_level() const {
|
|
322 return (NMT_TrackingLevel)_level;
|
|
323 }
|
|
324
|
|
325 inline size_t size() const { return _size; }
|
|
326 inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
|
|
327 bool get_stack(NativeCallStack& stack) const;
|
|
328
|
|
329 // Cleanup tracking information before the memory is released.
|
|
330 void release() const;
|
|
331
|
|
332 private:
|
|
333 inline void set_size(size_t size) {
|
|
334 assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
|
|
335 _size = size;
|
|
336 }
|
|
337 bool record_malloc_site(const NativeCallStack& stack, size_t size,
|
|
338 size_t* bucket_idx, size_t* pos_idx) const;
|
|
339 };
|
|
340
|
|
341
|
|
342 // Main class called from MemTracker to track malloc activities
|
|
343 class MallocTracker : AllStatic {
|
|
344 public:
|
|
345 // Initialize malloc tracker for specific tracking level
|
|
346 static bool initialize(NMT_TrackingLevel level);
|
|
347
|
|
348 static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
|
|
349
|
|
350 // malloc tracking header size for specific tracking level
|
|
351 static inline size_t malloc_header_size(NMT_TrackingLevel level) {
|
|
352 return (level == NMT_off) ? 0 : sizeof(MallocHeader);
|
|
353 }
|
|
354
|
|
355 // Parameter name convention:
|
|
356 // memblock : the beginning address for user data
|
|
357 // malloc_base: the beginning address that includes malloc tracking header
|
|
358 //
|
|
359 // The relationship:
|
|
360 // memblock = (char*)malloc_base + sizeof(nmt header)
|
|
361 //
|
|
362
|
|
363 // Record malloc on specified memory block
|
|
364 static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
|
|
365 const NativeCallStack& stack, NMT_TrackingLevel level);
|
|
366
|
|
367 // Record free on specified memory block
|
|
368 static void* record_free(void* memblock);
|
|
369
|
|
370 // Get tracking level of specified memory block
|
|
371 static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
|
|
372
|
|
373
|
|
374 // Offset memory address to header address
|
|
375 static inline void* get_base(void* memblock);
|
|
376 static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
|
|
377 if (memblock == NULL || level == NMT_off) return memblock;
|
|
378 return (char*)memblock - malloc_header_size(level);
|
|
379 }
|
|
380
|
|
381 // Get memory size
|
|
382 static inline size_t get_size(void* memblock) {
|
|
383 MallocHeader* header = malloc_header(memblock);
|
|
384 assert(header->tracking_level() >= NMT_summary,
|
|
385 "Wrong tracking level");
|
|
386 return header->size();
|
|
387 }
|
|
388
|
|
389 // Get memory type
|
|
390 static inline MEMFLAGS get_flags(void* memblock) {
|
|
391 MallocHeader* header = malloc_header(memblock);
|
|
392 assert(header->tracking_level() >= NMT_summary,
|
|
393 "Wrong tracking level");
|
|
394 return header->flags();
|
|
395 }
|
|
396
|
|
397 // Get header size
|
|
398 static inline size_t get_header_size(void* memblock) {
|
|
399 return (memblock == NULL) ? 0 : sizeof(MallocHeader);
|
|
400 }
|
|
401
|
|
402 static inline void record_new_arena(MEMFLAGS flags) {
|
|
403 MallocMemorySummary::record_new_arena(flags);
|
|
404 }
|
|
405
|
|
406 static inline void record_arena_free(MEMFLAGS flags) {
|
|
407 MallocMemorySummary::record_arena_free(flags);
|
|
408 }
|
|
409
|
|
410 static inline void record_arena_size_change(int size, MEMFLAGS flags) {
|
|
411 MallocMemorySummary::record_arena_size_change(size, flags);
|
|
412 }
|
|
413 private:
|
|
414 static inline MallocHeader* malloc_header(void *memblock) {
|
|
415 assert(memblock != NULL, "NULL pointer");
|
|
416 MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
|
|
417 assert(header->tracking_level() >= NMT_minimal, "Bad header");
|
|
418 return header;
|
|
419 }
|
|
420 };
|
|
421
|
|
422 #endif // INCLUDE_NMT
|
|
423
|
|
424
|
|
425 #endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
|