comparison src/share/vm/services/memSnapshot.hpp @ 6197:d2a62e0f25eb

6995781: Native Memory Tracking (Phase 1) 7151532: DCmd for hotspot native memory tracking Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd Reviewed-by: acorn, coleenp, fparain
author zgu
date Thu, 28 Jun 2012 17:03:16 -0400
parents
children 4acebbe310e1
comparison
equal deleted inserted replaced
6174:74533f63b116 6197:d2a62e0f25eb
1 /*
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
26 #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
27
28 #include "memory/allocation.hpp"
29 #include "runtime/mutex.hpp"
30 #include "runtime/mutexLocker.hpp"
31 #include "services/memBaseline.hpp"
32 #include "services/memPtrArray.hpp"
33
34
35 // Snapshot pointer array iterator
36
37 // The pointer array contains malloc-ed pointers
38 class MemPointerIterator : public MemPointerArrayIteratorImpl {
39 public:
40 MemPointerIterator(MemPointerArray* arr):
41 MemPointerArrayIteratorImpl(arr) {
42 assert(arr != NULL, "null array");
43 }
44
45 #ifdef ASSERT
46 virtual bool is_dup_pointer(const MemPointer* ptr1,
47 const MemPointer* ptr2) const {
48 MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
49 MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
50
51 if (p1->addr() != p2->addr()) return false;
52 if ((p1->flags() & MemPointerRecord::tag_masks) !=
53 (p2->flags() & MemPointerRecord::tag_masks)) {
54 return false;
55 }
56 // we do see multiple commit/uncommit on the same memory, it is ok
57 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
58 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
59 }
60
61 virtual bool insert(MemPointer* ptr) {
62 if (_pos > 0) {
63 MemPointer* p1 = (MemPointer*)ptr;
64 MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
65 assert(!is_dup_pointer(p1, p2),
66 "dup pointer");
67 }
68 if (_pos < _array->length() -1) {
69 MemPointer* p1 = (MemPointer*)ptr;
70 MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
71 assert(!is_dup_pointer(p1, p2),
72 "dup pointer");
73 }
74 return _array->insert_at(ptr, _pos);
75 }
76
77 virtual bool insert_after(MemPointer* ptr) {
78 if (_pos > 0) {
79 MemPointer* p1 = (MemPointer*)ptr;
80 MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
81 assert(!is_dup_pointer(p1, p2),
82 "dup pointer");
83 }
84 if (_pos < _array->length() - 1) {
85 MemPointer* p1 = (MemPointer*)ptr;
86 MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
87
88 assert(!is_dup_pointer(p1, p2),
89 "dup pointer");
90 }
91 if (_array->insert_at(ptr, _pos + 1)) {
92 _pos ++;
93 return true;
94 }
95 return false;
96 }
97 #endif
98
99 virtual MemPointer* locate(address addr) {
100 MemPointer* cur = current();
101 while (cur != NULL && cur->addr() < addr) {
102 cur = next();
103 }
104 return cur;
105 }
106 };
107
108 class VMMemPointerIterator : public MemPointerIterator {
109 public:
110 VMMemPointerIterator(MemPointerArray* arr):
111 MemPointerIterator(arr) {
112 }
113
114 // locate an exiting record that contains specified address, or
115 // the record, where the record with specified address, should
116 // be inserted
117 virtual MemPointer* locate(address addr) {
118 VMMemRegion* cur = (VMMemRegion*)current();
119 VMMemRegion* next_p;
120
121 while (cur != NULL) {
122 if (cur->base() > addr) {
123 return cur;
124 } else {
125 // find nearest existing range that has base address <= addr
126 next_p = (VMMemRegion*)peek_next();
127 if (next_p != NULL && next_p->base() <= addr) {
128 cur = (VMMemRegion*)next();
129 continue;
130 }
131 }
132
133 if (cur->is_reserve_record() &&
134 cur->base() <= addr &&
135 (cur->base() + cur->size() > addr)) {
136 return cur;
137 } else if (cur->is_commit_record() &&
138 cur->base() <= addr &&
139 (cur->base() + cur->committed_size() > addr)) {
140 return cur;
141 }
142 cur = (VMMemRegion*)next();
143 }
144 return NULL;
145 }
146
147 #ifdef ASSERT
148 virtual bool is_dup_pointer(const MemPointer* ptr1,
149 const MemPointer* ptr2) const {
150 VMMemRegion* p1 = (VMMemRegion*)ptr1;
151 VMMemRegion* p2 = (VMMemRegion*)ptr2;
152
153 if (p1->addr() != p2->addr()) return false;
154 if ((p1->flags() & MemPointerRecord::tag_masks) !=
155 (p2->flags() & MemPointerRecord::tag_masks)) {
156 return false;
157 }
158 // we do see multiple commit/uncommit on the same memory, it is ok
159 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
160 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
161 }
162 #endif
163 };
164
165 class StagingWalker : public MemPointerArrayIterator {
166 private:
167 MemPointerArrayIteratorImpl _itr;
168 bool _is_vm_record;
169 bool _end_of_array;
170 VMMemRegionEx _vm_record;
171 MemPointerRecordEx _malloc_record;
172
173 public:
174 StagingWalker(MemPointerArray* arr): _itr(arr) {
175 _end_of_array = false;
176 next();
177 }
178
179 // return the pointer at current position
180 MemPointer* current() const {
181 if (_end_of_array) {
182 return NULL;
183 }
184 if (is_vm_record()) {
185 return (MemPointer*)&_vm_record;
186 } else {
187 return (MemPointer*)&_malloc_record;
188 }
189 }
190
191 // return the next pointer and advance current position
192 MemPointer* next();
193
194 // type of 'current' record
195 bool is_vm_record() const {
196 return _is_vm_record;
197 }
198
199 // return the next poinger without advancing current position
200 MemPointer* peek_next() const {
201 assert(false, "not supported");
202 return NULL;
203 }
204
205 MemPointer* peek_prev() const {
206 assert(false, "not supported");
207 return NULL;
208 }
209 // remove the pointer at current position
210 void remove() {
211 assert(false, "not supported");
212 }
213
214 // insert the pointer at current position
215 bool insert(MemPointer* ptr) {
216 assert(false, "not supported");
217 return false;
218 }
219
220 bool insert_after(MemPointer* ptr) {
221 assert(false, "not supported");
222 return false;
223 }
224
225 private:
226 // consolidate all records referring to this vm region
227 bool consolidate_vm_records(VMMemRegionEx* vm_rec);
228 };
229
230 class MemBaseline;
231
232 class MemSnapshot : public CHeapObj<mtNMT> {
233 private:
234 // the following two arrays contain records of all known lived memory blocks
235 // live malloc-ed memory pointers
236 MemPointerArray* _alloc_ptrs;
237 // live virtual memory pointers
238 MemPointerArray* _vm_ptrs;
239
240 // stagging a generation's data, before
241 // it can be prompted to snapshot
242 MemPointerArray* _staging_area;
243
244 // the lock to protect this snapshot
245 Monitor* _lock;
246
247 NOT_PRODUCT(size_t _untracked_count;)
248 friend class MemBaseline;
249
250 public:
251 MemSnapshot();
252 virtual ~MemSnapshot();
253
254 // if we are running out of native memory
255 bool out_of_memory() const {
256 return (_alloc_ptrs == NULL || _staging_area == NULL ||
257 _vm_ptrs == NULL || _lock == NULL ||
258 _alloc_ptrs->out_of_memory() ||
259 _staging_area->out_of_memory() ||
260 _vm_ptrs->out_of_memory());
261 }
262
263 // merge a per-thread memory recorder into staging area
264 bool merge(MemRecorder* rec);
265 // promote staged data to snapshot
266 void promote();
267
268
269 void wait(long timeout) {
270 assert(_lock != NULL, "Just check");
271 MonitorLockerEx locker(_lock);
272 locker.wait(true, timeout);
273 }
274
275 NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
276 NOT_PRODUCT(void check_staging_data();)
277 NOT_PRODUCT(void check_malloc_pointers();)
278 NOT_PRODUCT(bool has_allocation_record(address addr);)
279
280 private:
281 // copy pointer data from src to dest
282 void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
283 };
284
285
286 #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP