comparison src/share/vm/services/memSnapshot.cpp @ 6882:716c64bda5ba

7199092: NMT: NMT needs to deal overlapped virtual memory ranges Summary: Enhanced virtual memory tracking to track committed regions as well as reserved regions, so NMT now can generate virtual memory map. Reviewed-by: acorn, coleenp
author zgu
date Fri, 19 Oct 2012 21:40:07 -0400
parents 33143ee07800
children 69ad7823b1ca
comparison
equal deleted inserted replaced
6879:8ebcedb7604d 6882:716c64bda5ba
29 #include "services/memPtr.hpp" 29 #include "services/memPtr.hpp"
30 #include "services/memPtrArray.hpp" 30 #include "services/memPtrArray.hpp"
31 #include "services/memSnapshot.hpp" 31 #include "services/memSnapshot.hpp"
32 #include "services/memTracker.hpp" 32 #include "services/memTracker.hpp"
33 33
34
35 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
36 VMMemRegionEx new_rec;
37 assert(rec->is_allocation_record() || rec->is_commit_record(),
38 "Sanity check");
39 if (MemTracker::track_callsite()) {
40 new_rec.init((MemPointerRecordEx*)rec);
41 } else {
42 new_rec.init(rec);
43 }
44 return insert(&new_rec);
45 }
46
47 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
48 VMMemRegionEx new_rec;
49 assert(rec->is_allocation_record() || rec->is_commit_record(),
50 "Sanity check");
51 if (MemTracker::track_callsite()) {
52 new_rec.init((MemPointerRecordEx*)rec);
53 } else {
54 new_rec.init(rec);
55 }
56 return insert_after(&new_rec);
57 }
58
59 // we don't consolidate reserved regions, since they may be categorized
60 // in different types.
61 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
62 assert(rec->is_allocation_record(), "Sanity check");
63 VMMemRegion* cur = (VMMemRegion*)current();
64
65 // we don't have anything yet
66 if (cur == NULL) {
67 return insert_record(rec);
68 }
69
70 assert(cur->is_reserved_region(), "Sanity check");
71 // duplicated records
72 if (cur->is_same_region(rec)) {
73 return true;
74 }
75 assert(cur->base() > rec->addr(), "Just check: locate()");
76 assert(rec->addr() + rec->size() <= cur->base(), "Can not overlap");
77 return insert_record(rec);
78 }
79
80 // we do consolidate committed regions
81 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
82 assert(rec->is_commit_record(), "Sanity check");
83 VMMemRegion* cur;
84 cur = (VMMemRegion*)current();
85 assert(cur->is_reserved_region() && cur->contains_region(rec),
86 "Sanity check");
87
88 // thread's native stack is always marked as "committed", ignore
89 // the "commit" operation for creating stack guard pages
90 if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
91 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
92 return true;
93 }
94
95 cur = (VMMemRegion*)next();
96 while (cur != NULL && cur->is_committed_region()) {
97 // duplicated commit records
98 if(cur->contains_region(rec)) {
99 return true;
100 }
101 if (cur->base() > rec->addr()) {
102 // committed regions can not overlap
103 assert(rec->addr() + rec->size() <= cur->base(), "Can not overlap");
104 if (rec->addr() + rec->size() == cur->base()) {
105 cur->expand_region(rec->addr(), rec->size());
106 return true;
107 } else {
108 return insert_record(rec);
109 }
110 } else if (cur->base() + cur->size() == rec->addr()) {
111 cur->expand_region(rec->addr(), rec->size());
112 VMMemRegion* next_reg = (VMMemRegion*)next();
113 // see if we can consolidate next committed region
114 if (next_reg != NULL && next_reg->is_committed_region() &&
115 next_reg->base() == cur->base() + cur->size()) {
116 cur->expand_region(next_reg->base(), next_reg->size());
117 remove();
118 }
119 return true;
120 }
121 cur = (VMMemRegion*)next();
122 }
123 return insert_record(rec);
124 }
125
126 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
127 assert(rec->is_uncommit_record(), "sanity check");
128 VMMemRegion* cur;
129 cur = (VMMemRegion*)current();
130 assert(cur->is_reserved_region() && cur->contains_region(rec),
131 "Sanity check");
132 // thread's native stack is always marked as "committed", ignore
133 // the "commit" operation for creating stack guard pages
134 if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
135 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
136 return true;
137 }
138
139 cur = (VMMemRegion*)next();
140 while (cur != NULL && cur->is_committed_region()) {
141 // region already uncommitted, must be due to duplicated record
142 if (cur->addr() >= rec->addr() + rec->size()) {
143 break;
144 } else if (cur->contains_region(rec)) {
145 // uncommit whole region
146 if (cur->is_same_region(rec)) {
147 remove();
148 break;
149 } else if (rec->addr() == cur->addr() ||
150 rec->addr() + rec->size() == cur->addr() + cur->size()) {
151 // uncommitted from either end of current memory region.
152 cur->exclude_region(rec->addr(), rec->size());
153 break;
154 } else { // split the committed region and release the middle
155 address high_addr = cur->addr() + cur->size();
156 size_t sz = high_addr - rec->addr();
157 cur->exclude_region(rec->addr(), sz);
158 sz = high_addr - (rec->addr() + rec->size());
159 if (MemTracker::track_callsite()) {
160 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
161 ((VMMemRegionEx*)cur)->pc());
162 return insert_record_after(&tmp);
163 } else {
164 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
165 return insert_record_after(&tmp);
166 }
167 }
168 }
169 cur = (VMMemRegion*)next();
170 }
171
172 // we may not find committed record due to duplicated records
173 return true;
174 }
175
176 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
177 assert(rec->is_deallocation_record(), "Sanity check");
178 VMMemRegion* cur = (VMMemRegion*)current();
179 assert(cur->is_reserved_region() && cur->contains_region(rec),
180 "Sanity check");
181 #ifdef ASSERT
182 VMMemRegion* next_reg = (VMMemRegion*)peek_next();
183 // should not have any committed memory in this reserved region
184 assert(next_reg == NULL || !next_reg->is_committed_region(), "Sanity check");
185 #endif
186 if (rec->is_same_region(cur)) {
187 remove();
188 } else if (rec->addr() == cur->addr() ||
189 rec->addr() + rec->size() == cur->addr() + cur->size()) {
190 // released region is at either end of this region
191 cur->exclude_region(rec->addr(), rec->size());
192 } else { // split the reserved region and release the middle
193 address high_addr = cur->addr() + cur->size();
194 size_t sz = high_addr - rec->addr();
195 cur->exclude_region(rec->addr(), sz);
196 sz = high_addr - rec->addr() - rec->size();
197 if (MemTracker::track_callsite()) {
198 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
199 ((VMMemRegionEx*)cur)->pc());
200 return insert_reserved_region(&tmp);
201 } else {
202 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
203 return insert_reserved_region(&tmp);
204 }
205 }
206 return true;
207 }
208
209 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
210 // skip all 'commit' records associated with previous reserved region
211 VMMemRegion* p = (VMMemRegion*)next();
212 while (p != NULL && p->is_committed_region() &&
213 p->base() + p->size() < rec->addr()) {
214 p = (VMMemRegion*)next();
215 }
216 return insert_record(rec);
217 }
218
219 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
220 assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
221 address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
222 if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
223 size_t sz = rgn->size() - new_rgn_size;
224 // the original region becomes 'new' region
225 rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
226 // remaining becomes next region
227 MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
228 return insert_reserved_region(&next_rgn);
229 } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
230 rgn->exclude_region(new_rgn_addr, new_rgn_size);
231 MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
232 return insert_reserved_region(&next_rgn);
233 } else {
234 // the orginal region will be split into three
235 address rgn_high_addr = rgn->base() + rgn->size();
236 // first region
237 rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
238 // the second region is the new region
239 MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
240 if (!insert_reserved_region(&new_rgn)) return false;
241 // the remaining region
242 MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
243 rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
244 return insert_reserved_region(&rem_rgn);
245 }
246 }
247
34 static int sort_in_seq_order(const void* p1, const void* p2) { 248 static int sort_in_seq_order(const void* p1, const void* p2) {
35 assert(p1 != NULL && p2 != NULL, "Sanity check"); 249 assert(p1 != NULL && p2 != NULL, "Sanity check");
36 const MemPointerRecord* mp1 = (MemPointerRecord*)p1; 250 const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
37 const MemPointerRecord* mp2 = (MemPointerRecord*)p2; 251 const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
38 return (mp1->seq() - mp2->seq()); 252 return (mp1->seq() - mp2->seq());
59 return false; 273 return false;
60 } 274 }
61 } 275 }
62 276
63 277
64 MemPointerArrayIteratorImpl StagingArea::virtual_memory_record_walker() { 278 VMRecordIterator StagingArea::virtual_memory_record_walker() {
65 MemPointerArray* arr = vm_data(); 279 MemPointerArray* arr = vm_data();
66 // sort into seq number order 280 // sort into seq number order
67 arr->sort((FN_SORT)sort_in_seq_order); 281 arr->sort((FN_SORT)sort_in_seq_order);
68 return MemPointerArrayIteratorImpl(arr); 282 return VMRecordIterator(arr);
69 } 283 }
70 284
71 285
72 MemSnapshot::MemSnapshot() { 286 MemSnapshot::MemSnapshot() {
73 if (MemTracker::track_callsite()) { 287 if (MemTracker::track_callsite()) {
133 // we don't do anything with virtual memory records during merge 347 // we don't do anything with virtual memory records during merge
134 if (!_staging_area.vm_data()->append(p1)) { 348 if (!_staging_area.vm_data()->append(p1)) {
135 return false; 349 return false;
136 } 350 }
137 } else { 351 } else {
352 // locate matched record and/or also position the iterator to proper
353 // location for this incoming record.
138 p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr()); 354 p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
139 // we have not seen this memory block, so just add to staging area 355 // we have not seen this memory block, so just add to staging area
140 if (p2 == NULL) { 356 if (p2 == NULL) {
141 if (!malloc_staging_itr.insert(p1)) { 357 if (!malloc_staging_itr.insert(p1)) {
142 return false; 358 return false;
197 MutexLockerEx lock(_lock, true); 413 MutexLockerEx lock(_lock, true);
198 414
199 MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker(); 415 MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker();
200 bool promoted = false; 416 bool promoted = false;
201 if (promote_malloc_records(&malloc_itr)) { 417 if (promote_malloc_records(&malloc_itr)) {
202 MemPointerArrayIteratorImpl vm_itr = _staging_area.virtual_memory_record_walker(); 418 VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
203 if (promote_virtual_memory_records(&vm_itr)) { 419 if (promote_virtual_memory_records(&vm_itr)) {
204 promoted = true; 420 promoted = true;
205 } 421 }
206 } 422 }
207 423
216 MemPointerRecord* matched_rec; 432 MemPointerRecord* matched_rec;
217 while (new_rec != NULL) { 433 while (new_rec != NULL) {
218 matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr()); 434 matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
219 // found matched memory block 435 // found matched memory block
220 if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) { 436 if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
221 // snapshot already contains 'lived' records 437 // snapshot already contains 'live' records
222 assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(), 438 assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
223 "Sanity check"); 439 "Sanity check");
224 // update block states 440 // update block states
225 if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) { 441 if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
226 copy_pointer(matched_rec, new_rec); 442 copy_pointer(matched_rec, new_rec);
275 } 491 }
276 492
277 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) { 493 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
278 VMMemPointerIterator vm_snapshot_itr(_vm_ptrs); 494 VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
279 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current(); 495 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
280 VMMemRegionEx new_vm_rec; 496 VMMemRegion* reserved_rec;
281 VMMemRegion* matched_rec;
282 while (new_rec != NULL) { 497 while (new_rec != NULL) {
283 assert(new_rec->is_vm_pointer(), "Sanity check"); 498 assert(new_rec->is_vm_pointer(), "Sanity check");
284 if (MemTracker::track_callsite()) { 499
285 new_vm_rec.init((MemPointerRecordEx*)new_rec); 500 // locate a reserved region that contains the specified address, or
501 // the nearest reserved region has base address just above the specified
502 // address
503 reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
504 if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
505 // snapshot can only have 'live' records
506 assert(reserved_rec->is_reserved_region(), "Sanity check");
507 if (new_rec->is_allocation_record()) {
508 if (!reserved_rec->is_same_region(new_rec)) {
509 // only deal with split a bigger reserved region into smaller regions.
510 // So far, CDS is the only use case.
511 if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
512 return false;
513 }
514 }
515 } else if (new_rec->is_uncommit_record()) {
516 if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
517 return false;
518 }
519 } else if (new_rec->is_commit_record()) {
520 // insert or expand existing committed region to cover this
521 // newly committed region
522 if (!vm_snapshot_itr.add_committed_region(new_rec)) {
523 return false;
524 }
525 } else if (new_rec->is_deallocation_record()) {
526 // release part or all memory region
527 if (!vm_snapshot_itr.remove_released_region(new_rec)) {
528 return false;
529 }
530 } else if (new_rec->is_type_tagging_record()) {
531 // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
532 // to different type.
533 assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
534 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
535 "Sanity check");
536 reserved_rec->tag(new_rec->flags());
286 } else { 537 } else {
287 new_vm_rec.init(new_rec); 538 ShouldNotReachHere();
288 }
289 matched_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
290 if (matched_rec != NULL &&
291 (matched_rec->contains(&new_vm_rec) || matched_rec->base() == new_vm_rec.base())) {
292 // snapshot can only have 'live' records
293 assert(matched_rec->is_reserve_record(), "Sanity check");
294 if (new_vm_rec.is_reserve_record() && matched_rec->base() == new_vm_rec.base()) {
295 // resize reserved virtual memory range
296 // resize has to cover committed area
297 assert(new_vm_rec.size() >= matched_rec->committed_size(), "Sanity check");
298 matched_rec->set_reserved_size(new_vm_rec.size());
299 } else if (new_vm_rec.is_commit_record()) {
300 // commit memory inside reserved memory range
301 assert(new_vm_rec.committed_size() <= matched_rec->reserved_size(), "Sanity check");
302 // thread stacks are marked committed, so we ignore 'commit' record for creating
303 // stack guard pages
304 if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) != mtThreadStack) {
305 matched_rec->commit(new_vm_rec.committed_size());
306 }
307 } else if (new_vm_rec.is_uncommit_record()) {
308 if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtThreadStack) {
309 // ignore 'uncommit' record from removing stack guard pages, uncommit
310 // thread stack as whole
311 if (matched_rec->committed_size() == new_vm_rec.committed_size()) {
312 matched_rec->uncommit(new_vm_rec.committed_size());
313 } 539 }
314 } else { 540 } else {
315 // uncommit memory inside reserved memory range 541 /*
316 assert(new_vm_rec.committed_size() <= matched_rec->committed_size(), 542 * The assertion failure indicates mis-matched virtual memory records. The likely
317 "Sanity check"); 543 * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
318 matched_rec->uncommit(new_vm_rec.committed_size()); 544 * api, which have to be tracked manually. (perfMemory is an example).
319 } 545 */
320 } else if (new_vm_rec.is_type_tagging_record()) { 546 assert(new_rec->is_allocation_record(), "Sanity check");
321 // tag this virtual memory range to a memory type 547 if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
322 // can not re-tag a memory range to different type
323 assert(FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtNone ||
324 FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_vm_rec.flags()),
325 "Sanity check");
326 matched_rec->tag(new_vm_rec.flags());
327 } else if (new_vm_rec.is_release_record()) {
328 // release part or whole memory range
329 if (new_vm_rec.base() == matched_rec->base() &&
330 new_vm_rec.size() == matched_rec->size()) {
331 // release whole virtual memory range
332 assert(matched_rec->committed_size() == 0, "Sanity check");
333 vm_snapshot_itr.remove();
334 } else {
335 // partial release
336 matched_rec->partial_release(new_vm_rec.base(), new_vm_rec.size());
337 }
338 } else {
339 // multiple reserve/commit on the same virtual memory range
340 assert((new_vm_rec.is_reserve_record() || new_vm_rec.is_commit_record()) &&
341 (new_vm_rec.base() == matched_rec->base() && new_vm_rec.size() == matched_rec->size()),
342 "Sanity check");
343 matched_rec->tag(new_vm_rec.flags());
344 }
345 } else {
346 // no matched record
347 if (new_vm_rec.is_reserve_record()) {
348 if (matched_rec == NULL || matched_rec->base() > new_vm_rec.base()) {
349 if (!vm_snapshot_itr.insert(&new_vm_rec)) {
350 return false; 548 return false;
351 } 549 }
352 } else {
353 if (!vm_snapshot_itr.insert_after(&new_vm_rec)) {
354 return false;
355 }
356 }
357 } else {
358 // throw out obsolete records, which are the commit/uncommit/release/tag records
359 // on memory regions that are already released.
360 }
361 } 550 }
362 new_rec = (MemPointerRecord*)itr->next(); 551 new_rec = (MemPointerRecord*)itr->next();
363 } 552 }
364 return true; 553 return true;
365 } 554 }
431 while (cur != NULL) { 620 while (cur != NULL) {
432 assert(cur->is_vm_pointer(), "virtual memory pointer only"); 621 assert(cur->is_vm_pointer(), "virtual memory pointer only");
433 cur = (MemPointerRecord*)vm_itr.next(); 622 cur = (MemPointerRecord*)vm_itr.next();
434 } 623 }
435 } 624 }
625
626 void MemSnapshot::dump_all_vm_pointers() {
627 MemPointerArrayIteratorImpl itr(_vm_ptrs);
628 VMMemRegion* ptr = (VMMemRegion*)itr.current();
629 tty->print_cr("dump virtual memory pointers:");
630 while (ptr != NULL) {
631 if (ptr->is_committed_region()) {
632 tty->print("\t");
633 }
634 tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
635 (ptr->addr() + ptr->size()), ptr->flags());
636
637 if (MemTracker::track_callsite()) {
638 VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
639 if (ex->pc() != NULL) {
640 char buf[1024];
641 if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
642 tty->print_cr("\t%s", buf);
643 } else {
644 tty->print_cr("");
645 }
646 }
647 }
648
649 ptr = (VMMemRegion*)itr.next();
650 }
651 tty->flush();
652 }
436 #endif // ASSERT 653 #endif // ASSERT
437 654