comparison src/share/vm/services/memSnapshot.hpp @ 6882:716c64bda5ba

7199092: NMT: NMT needs to deal overlapped virtual memory ranges Summary: Enhanced virtual memory tracking to track committed regions as well as reserved regions, so NMT now can generate virtual memory map. Reviewed-by: acorn, coleenp
author zgu
date Fri, 19 Oct 2012 21:40:07 -0400
parents 33143ee07800
children fb3190e77d3c
comparison
equal deleted inserted replaced
6879:8ebcedb7604d 6882:716c64bda5ba
109 public: 109 public:
110 VMMemPointerIterator(MemPointerArray* arr): 110 VMMemPointerIterator(MemPointerArray* arr):
111 MemPointerIterator(arr) { 111 MemPointerIterator(arr) {
112 } 112 }
113 113
114 // locate an existing record that contains specified address, or 114 // locate an existing reserved memory region that contains specified address,
115 // the record, where the record with specified address, should 115 // or the reserved region just above this address, where the incoming
116 // be inserted. 116 // reserved region should be inserted.
117 // virtual memory record array is sorted in address order, so
118 // binary search is performed
119 virtual MemPointer* locate(address addr) { 117 virtual MemPointer* locate(address addr) {
120 int index_low = 0; 118 reset();
121 int index_high = _array->length(); 119 VMMemRegion* reg = (VMMemRegion*)current();
122 int index_mid = (index_high + index_low) / 2; 120 while (reg != NULL) {
123 int r = 1; 121 if (reg->is_reserved_region()) {
124 while (index_low < index_high && (r = compare(index_mid, addr)) != 0) { 122 if (reg->contains_address(addr) || addr < reg->base()) {
125 if (r > 0) { 123 return reg;
126 index_high = index_mid;
127 } else {
128 index_low = index_mid;
129 } 124 }
130 index_mid = (index_high + index_low) / 2; 125 }
131 } 126 reg = (VMMemRegion*)next();
132 if (r == 0) { 127 }
133 // update current location
134 _pos = index_mid;
135 return _array->at(index_mid);
136 } else {
137 return NULL; 128 return NULL;
138 } 129 }
139 } 130
140 131 // following methods update virtual memory in the context
132 // of 'current' position, which is properly positioned by
133 // callers via locate method.
134 bool add_reserved_region(MemPointerRecord* rec);
135 bool add_committed_region(MemPointerRecord* rec);
136 bool remove_uncommitted_region(MemPointerRecord* rec);
137 bool remove_released_region(MemPointerRecord* rec);
138
139 // split a reserved region to create a new memory region with specified base and size
140 bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
141 private:
142 bool insert_record(MemPointerRecord* rec);
143 bool insert_record_after(MemPointerRecord* rec);
144
145 bool insert_reserved_region(MemPointerRecord* rec);
146
147 // reset current position
148 inline void reset() { _pos = 0; }
141 #ifdef ASSERT 149 #ifdef ASSERT
142 virtual bool is_dup_pointer(const MemPointer* ptr1, 150 virtual bool is_dup_pointer(const MemPointer* ptr1,
143 const MemPointer* ptr2) const { 151 const MemPointer* ptr2) const {
144 VMMemRegion* p1 = (VMMemRegion*)ptr1; 152 VMMemRegion* p1 = (VMMemRegion*)ptr1;
145 VMMemRegion* p2 = (VMMemRegion*)ptr2; 153 VMMemRegion* p2 = (VMMemRegion*)ptr2;
152 // we do see multiple commit/uncommit on the same memory, it is ok 160 // we do see multiple commit/uncommit on the same memory, it is ok
153 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc || 161 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
154 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; 162 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
155 } 163 }
156 #endif 164 #endif
157 // compare if an address falls into a memory region,
158 // return 0, if the address falls into a memory region at specified index
159 // return 1, if memory region pointed by specified index is higher than the address
160 // return -1, if memory region pointed by specified index is lower than the address
161 int compare(int index, address addr) const {
162 VMMemRegion* r = (VMMemRegion*)_array->at(index);
163 assert(r->is_reserve_record(), "Sanity check");
164 if (r->addr() > addr) {
165 return 1;
166 } else if (r->addr() + r->reserved_size() <= addr) {
167 return -1;
168 } else {
169 return 0;
170 }
171 }
172 }; 165 };
173 166
174 class MallocRecordIterator : public MemPointerArrayIterator { 167 class MallocRecordIterator : public MemPointerArrayIterator {
175 private: 168 protected:
176 MemPointerArrayIteratorImpl _itr; 169 MemPointerArrayIteratorImpl _itr;
177 170
178 public: 171 public:
179 MallocRecordIterator(MemPointerArray* arr) : _itr(arr) { 172 MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
180 } 173 }
181 174
182 MemPointer* current() const { 175 virtual MemPointer* current() const {
183 MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); 176 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
184 assert(cur == NULL || !cur->is_vm_pointer(), "seek error"); 177 assert(cur == NULL || !cur->is_vm_pointer(), "seek error");
185 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); 178 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
186 if (next == NULL || next->addr() != cur->addr()) { 179 if (next == NULL || next->addr() != cur->addr()) {
187 return cur; 180 return cur;
192 assert(cur->seq() != next->seq(), "Sanity check"); 185 assert(cur->seq() != next->seq(), "Sanity check");
193 return cur->seq() > next->seq() ? cur : next; 186 return cur->seq() > next->seq() ? cur : next;
194 } 187 }
195 } 188 }
196 189
197 MemPointer* next() { 190 virtual MemPointer* next() {
198 MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); 191 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
199 assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check"); 192 assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check");
200 MemPointerRecord* next = (MemPointerRecord*)_itr.next(); 193 MemPointerRecord* next = (MemPointerRecord*)_itr.next();
201 if (next == NULL) { 194 if (next == NULL) {
202 return NULL; 195 return NULL;
212 void remove() { ShouldNotReachHere(); } 205 void remove() { ShouldNotReachHere(); }
213 bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; } 206 bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
214 bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; } 207 bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
215 }; 208 };
216 209
210 // collapse duplicated records. Eliminating duplicated records here, is much
211 // cheaper than during promotion phase. However, it does have limitation - it
212 // can only eliminate duplicated records within the generation, there are
213 // still chances seeing duplicated records during promotion.
214 // We want to use the record with higher sequence number, because it has
215 // more accurate callsite pc.
216 class VMRecordIterator : public MallocRecordIterator {
217 public:
218 VMRecordIterator(MemPointerArray* arr) : MallocRecordIterator(arr) {
219 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
220 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
221 while (next != NULL) {
222 assert(cur != NULL, "Sanity check");
223 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
224 "pre-sort order");
225
226 if (is_duplicated_record(cur, next)) {
227 _itr.next();
228 next = (MemPointerRecord*)_itr.peek_next();
229 } else {
230 break;
231 }
232 }
233 }
234
235 virtual MemPointer* current() const {
236 return _itr.current();
237 }
238
239 // get next record, but skip the duplicated records
240 virtual MemPointer* next() {
241 MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
242 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
243 while (next != NULL) {
244 assert(cur != NULL, "Sanity check");
245 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
246 "pre-sort order");
247
248 if (is_duplicated_record(cur, next)) {
249 _itr.next();
250 cur = next;
251 next = (MemPointerRecord*)_itr.peek_next();
252 } else {
253 break;
254 }
255 }
256 return cur;
257 }
258
259 private:
260 bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
261 bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
262 assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
263 return ret;
264 }
265 };
266
217 class StagingArea : public _ValueObj { 267 class StagingArea : public _ValueObj {
218 private: 268 private:
219 MemPointerArray* _malloc_data; 269 MemPointerArray* _malloc_data;
220 MemPointerArray* _vm_data; 270 MemPointerArray* _vm_data;
221 271
231 281
232 MallocRecordIterator malloc_record_walker() { 282 MallocRecordIterator malloc_record_walker() {
233 return MallocRecordIterator(malloc_data()); 283 return MallocRecordIterator(malloc_data());
234 } 284 }
235 285
236 MemPointerArrayIteratorImpl virtual_memory_record_walker(); 286 VMRecordIterator virtual_memory_record_walker();
287
237 bool init(); 288 bool init();
238 void clear() { 289 void clear() {
239 assert(_malloc_data != NULL && _vm_data != NULL, "Just check"); 290 assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
240 _malloc_data->shrink(); 291 _malloc_data->shrink();
241 _malloc_data->clear(); 292 _malloc_data->clear();
291 342
292 NOT_PRODUCT(void print_snapshot_stats(outputStream* st);) 343 NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
293 NOT_PRODUCT(void check_staging_data();) 344 NOT_PRODUCT(void check_staging_data();)
294 NOT_PRODUCT(void check_malloc_pointers();) 345 NOT_PRODUCT(void check_malloc_pointers();)
295 NOT_PRODUCT(bool has_allocation_record(address addr);) 346 NOT_PRODUCT(bool has_allocation_record(address addr);)
347 // dump all virtual memory pointers in snapshot
348 DEBUG_ONLY( void dump_all_vm_pointers();)
296 349
297 private: 350 private:
298 // copy pointer data from src to dest 351 // copy pointer data from src to dest
299 void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src); 352 void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
300 353
301 bool promote_malloc_records(MemPointerArrayIterator* itr); 354 bool promote_malloc_records(MemPointerArrayIterator* itr);
302 bool promote_virtual_memory_records(MemPointerArrayIterator* itr); 355 bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
303 }; 356 };
304 357
305
306 #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP 358 #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP