Mercurial > hg > truffle
annotate src/share/vm/services/memTracker.cpp @ 8883:b9a918201d47
Merge with hsx25
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Sat, 06 Apr 2013 20:04:06 +0200 |
parents | 06db4c0afbf3 |
children | 4c8bb5e4f68f |
rev | line source |
---|---|
6197 | 1 /* |
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |
20 * or visit www.oracle.com if you need additional information or have any | |
21 * questions. | |
22 * | |
23 */ | |
24 #include "precompiled.hpp" | |
25 | |
7464
ecd24264898b
8005048: NMT: #loaded classes needs to just show the # defined classes
zgu
parents:
7172
diff
changeset
|
26 #include "oops/instanceKlass.hpp" |
6197 | 27 #include "runtime/atomic.hpp" |
28 #include "runtime/interfaceSupport.hpp" | |
29 #include "runtime/mutexLocker.hpp" | |
30 #include "runtime/safepoint.hpp" | |
31 #include "runtime/threadCritical.hpp" | |
7971
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
32 #include "runtime/vm_operations.hpp" |
6197 | 33 #include "services/memPtr.hpp" |
34 #include "services/memReporter.hpp" | |
35 #include "services/memTracker.hpp" | |
36 #include "utilities/decoder.hpp" | |
37 #include "utilities/globalDefinitions.hpp" | |
38 | |
39 bool NMT_track_callsite = false; | |
40 | |
41 // walk all 'known' threads at NMT sync point, and collect their recorders | |
42 void SyncThreadRecorderClosure::do_thread(Thread* thread) { | |
43 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); | |
44 if (thread->is_Java_thread()) { | |
45 JavaThread* javaThread = (JavaThread*)thread; | |
46 MemRecorder* recorder = javaThread->get_recorder(); | |
47 if (recorder != NULL) { | |
48 MemTracker::enqueue_pending_recorder(recorder); | |
49 javaThread->set_recorder(NULL); | |
50 } | |
51 } | |
52 _thread_count ++; | |
53 } | |
54 | |
55 | |
56 MemRecorder* MemTracker::_global_recorder = NULL; | |
57 MemSnapshot* MemTracker::_snapshot = NULL; | |
58 MemBaseline MemTracker::_baseline; | |
6232
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
59 Mutex* MemTracker::_query_lock = NULL; |
6197 | 60 volatile MemRecorder* MemTracker::_merge_pending_queue = NULL; |
61 volatile MemRecorder* MemTracker::_pooled_recorders = NULL; | |
62 MemTrackWorker* MemTracker::_worker_thread = NULL; | |
63 int MemTracker::_sync_point_skip_count = 0; | |
64 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off; | |
65 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited; | |
66 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; | |
67 int MemTracker::_thread_count = 255; | |
68 volatile jint MemTracker::_pooled_recorder_count = 0; | |
7971
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
69 volatile unsigned long MemTracker::_processing_generation = 0; |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
70 volatile bool MemTracker::_worker_thread_idle = false; |
8810
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
71 volatile bool MemTracker::_slowdown_calling_thread = false; |
6197 | 72 debug_only(intx MemTracker::_main_thread_tid = 0;) |
6607
e5bf1c79ed5b
7191124: Optimized build is broken due to inconsistent use of DEBUG_ONLY and NOT_PRODUCT macros in NMT
zgu
parents:
6235
diff
changeset
|
73 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) |
6197 | 74 |
75 void MemTracker::init_tracking_options(const char* option_line) { | |
76 _tracking_level = NMT_off; | |
7172
bbc14465e7db
8003689: MemTracker::init_tracking_options() reads outside array if commandline argument is empty
zgu
parents:
6882
diff
changeset
|
77 if (strcmp(option_line, "=summary") == 0) { |
6197 | 78 _tracking_level = NMT_summary; |
7172
bbc14465e7db
8003689: MemTracker::init_tracking_options() reads outside array if commandline argument is empty
zgu
parents:
6882
diff
changeset
|
79 } else if (strcmp(option_line, "=detail") == 0) { |
6197 | 80 _tracking_level = NMT_detail; |
7172
bbc14465e7db
8003689: MemTracker::init_tracking_options() reads outside array if commandline argument is empty
zgu
parents:
6882
diff
changeset
|
81 } else if (strcmp(option_line, "=off") != 0) { |
bbc14465e7db
8003689: MemTracker::init_tracking_options() reads outside array if commandline argument is empty
zgu
parents:
6882
diff
changeset
|
82 vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL); |
6197 | 83 } |
84 } | |
85 | |
86 // first phase of bootstrapping, when VM is still in single-threaded mode. | |
87 void MemTracker::bootstrap_single_thread() { | |
88 if (_tracking_level > NMT_off) { | |
89 assert(_state == NMT_uninited, "wrong state"); | |
90 | |
91 // NMT is not supported with UseMallocOnly is on. NMT can NOT | |
92 // handle the amount of malloc data without significantly impacting | |
93 // runtime performance when this flag is on. | |
94 if (UseMallocOnly) { | |
95 shutdown(NMT_use_malloc_only); | |
96 return; | |
97 } | |
98 | |
6232
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
99 _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock"); |
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
100 if (_query_lock == NULL) { |
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
101 shutdown(NMT_out_of_memory); |
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
102 return; |
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
103 } |
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
104 |
6197 | 105 debug_only(_main_thread_tid = os::current_thread_id();) |
106 _state = NMT_bootstrapping_single_thread; | |
107 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); | |
108 } | |
109 } | |
110 | |
111 // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode. | |
112 void MemTracker::bootstrap_multi_thread() { | |
113 if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) { | |
114 // create nmt lock for multi-thread execution | |
115 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); | |
116 _state = NMT_bootstrapping_multi_thread; | |
117 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); | |
118 } | |
119 } | |
120 | |
121 // fully start nmt | |
122 void MemTracker::start() { | |
123 // Native memory tracking is off from command line option | |
124 if (_tracking_level == NMT_off || shutdown_in_progress()) return; | |
125 | |
126 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); | |
127 assert(_state == NMT_bootstrapping_multi_thread, "wrong state"); | |
128 | |
129 _snapshot = new (std::nothrow)MemSnapshot(); | |
130 if (_snapshot != NULL && !_snapshot->out_of_memory()) { | |
131 if (start_worker()) { | |
132 _state = NMT_started; | |
133 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); | |
134 return; | |
135 } | |
136 } | |
137 | |
138 // fail to start native memory tracking, shut it down | |
139 shutdown(NMT_initialization); | |
140 } | |
141 | |
142 /** | |
143 * Shutting down native memory tracking. | |
144 * We can not shutdown native memory tracking immediately, so we just | |
145 * setup shutdown pending flag, every native memory tracking component | |
146 * should orderly shut itself down. | |
147 * | |
148 * The shutdown sequences: | |
149 * 1. MemTracker::shutdown() sets MemTracker to shutdown pending state | |
150 * 2. Worker thread calls MemTracker::final_shutdown(), which transites | |
151 * MemTracker to final shutdown state. | |
152 * 3. At sync point, MemTracker does final cleanup, before sets memory | |
153 * tracking level to off to complete shutdown. | |
154 */ | |
155 void MemTracker::shutdown(ShutdownReason reason) { | |
156 if (_tracking_level == NMT_off) return; | |
157 | |
158 if (_state <= NMT_bootstrapping_single_thread) { | |
159 // we still in single thread mode, there is not contention | |
160 _state = NMT_shutdown_pending; | |
161 _reason = reason; | |
162 } else { | |
163 // we want to know who initialized shutdown | |
164 if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending, | |
165 (jint*)&_state, (jint)NMT_started)) { | |
166 _reason = reason; | |
167 } | |
168 } | |
169 } | |
170 | |
171 // final phase of shutdown | |
172 void MemTracker::final_shutdown() { | |
173 // delete all pending recorders and pooled recorders | |
174 delete_all_pending_recorders(); | |
175 delete_all_pooled_recorders(); | |
176 | |
177 { | |
178 // shared baseline and snapshot are the only objects needed to | |
179 // create query results | |
6232
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
180 MutexLockerEx locker(_query_lock, true); |
6197 | 181 // cleanup baseline data and snapshot |
182 _baseline.clear(); | |
183 delete _snapshot; | |
184 _snapshot = NULL; | |
185 } | |
186 | |
187 // shutdown shared decoder instance, since it is only | |
188 // used by native memory tracking so far. | |
189 Decoder::shutdown(); | |
190 | |
191 MemTrackWorker* worker = NULL; | |
192 { | |
193 ThreadCritical tc; | |
194 // can not delete worker inside the thread critical | |
195 if (_worker_thread != NULL && Thread::current() == _worker_thread) { | |
196 worker = _worker_thread; | |
197 _worker_thread = NULL; | |
198 } | |
199 } | |
200 if (worker != NULL) { | |
201 delete worker; | |
202 } | |
203 _state = NMT_final_shutdown; | |
204 } | |
205 | |
206 // delete all pooled recorders | |
207 void MemTracker::delete_all_pooled_recorders() { | |
208 // free all pooled recorders | |
209 volatile MemRecorder* cur_head = _pooled_recorders; | |
210 if (cur_head != NULL) { | |
211 MemRecorder* null_ptr = NULL; | |
212 while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, | |
213 (void*)&_pooled_recorders, (void*)cur_head)) { | |
214 cur_head = _pooled_recorders; | |
215 } | |
216 if (cur_head != NULL) { | |
217 delete cur_head; | |
218 _pooled_recorder_count = 0; | |
219 } | |
220 } | |
221 } | |
222 | |
223 // delete all recorders in pending queue | |
224 void MemTracker::delete_all_pending_recorders() { | |
225 // free all pending recorders | |
226 MemRecorder* pending_head = get_pending_recorders(); | |
227 if (pending_head != NULL) { | |
228 delete pending_head; | |
229 } | |
230 } | |
231 | |
232 /* | |
233 * retrieve per-thread recorder of specified thread. | |
234 * if thread == NULL, it means global recorder | |
235 */ | |
236 MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) { | |
237 if (shutdown_in_progress()) return NULL; | |
238 | |
239 MemRecorder* rc; | |
240 if (thread == NULL) { | |
241 rc = _global_recorder; | |
242 } else { | |
243 rc = thread->get_recorder(); | |
244 } | |
245 | |
246 if (rc != NULL && rc->is_full()) { | |
247 enqueue_pending_recorder(rc); | |
248 rc = NULL; | |
249 } | |
250 | |
251 if (rc == NULL) { | |
252 rc = get_new_or_pooled_instance(); | |
253 if (thread == NULL) { | |
254 _global_recorder = rc; | |
255 } else { | |
256 thread->set_recorder(rc); | |
257 } | |
258 } | |
259 return rc; | |
260 } | |
261 | |
262 /* | |
263 * get a per-thread recorder from pool, or create a new one if | |
264 * there is not one available. | |
265 */ | |
266 MemRecorder* MemTracker::get_new_or_pooled_instance() { | |
267 MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders); | |
268 if (cur_head == NULL) { | |
269 MemRecorder* rec = new (std::nothrow)MemRecorder(); | |
270 if (rec == NULL || rec->out_of_memory()) { | |
271 shutdown(NMT_out_of_memory); | |
272 if (rec != NULL) { | |
273 delete rec; | |
274 rec = NULL; | |
275 } | |
276 } | |
277 return rec; | |
278 } else { | |
279 MemRecorder* next_head = cur_head->next(); | |
280 if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders, | |
281 (void*)cur_head)) { | |
282 return get_new_or_pooled_instance(); | |
283 } | |
284 cur_head->set_next(NULL); | |
285 Atomic::dec(&_pooled_recorder_count); | |
7971
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
286 cur_head->set_generation(); |
6197 | 287 return cur_head; |
288 } | |
289 } | |
290 | |
291 /* | |
292 * retrieve all recorders in pending queue, and empty the queue | |
293 */ | |
294 MemRecorder* MemTracker::get_pending_recorders() { | |
295 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue); | |
296 MemRecorder* null_ptr = NULL; | |
297 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue, | |
298 (void*)cur_head)) { | |
299 cur_head = const_cast<MemRecorder*>(_merge_pending_queue); | |
300 } | |
6607
e5bf1c79ed5b
7191124: Optimized build is broken due to inconsistent use of DEBUG_ONLY and NOT_PRODUCT macros in NMT
zgu
parents:
6235
diff
changeset
|
301 NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count)); |
6197 | 302 return cur_head; |
303 } | |
304 | |
305 /* | |
306 * release a recorder to recorder pool. | |
307 */ | |
308 void MemTracker::release_thread_recorder(MemRecorder* rec) { | |
309 assert(rec != NULL, "null recorder"); | |
310 // we don't want to pool too many recorders | |
311 rec->set_next(NULL); | |
312 if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) { | |
313 delete rec; | |
314 return; | |
315 } | |
316 | |
317 rec->clear(); | |
318 MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders); | |
319 rec->set_next(cur_head); | |
320 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders, | |
321 (void*)cur_head)) { | |
322 cur_head = const_cast<MemRecorder*>(_pooled_recorders); | |
323 rec->set_next(cur_head); | |
324 } | |
325 Atomic::inc(&_pooled_recorder_count); | |
326 } | |
327 | |
328 /* | |
329 * This is the most important method in whole nmt implementation. | |
330 * | |
331 * Create a memory record. | |
332 * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM | |
333 * still in single thread mode. | |
334 * 2. For all threads other than JavaThread, ThreadCritical is needed | |
335 * to write to recorders to global recorder. | |
336 * 3. For JavaThreads that are not longer visible by safepoint, also | |
337 * need to take ThreadCritical and records are written to global | |
338 * recorders, since these threads are NOT walked by Threads.do_thread(). | |
339 * 4. JavaThreads that are running in native state, have to transition | |
340 * to VM state before writing to per-thread recorders. | |
341 * 5. JavaThreads that are running in VM state do not need any lock and | |
342 * records are written to per-thread recorders. | |
343 * 6. For a thread has yet to attach VM 'Thread', they need to take | |
344 * ThreadCritical to write to global recorder. | |
345 * | |
346 * Important note: | |
347 * NO LOCK should be taken inside ThreadCritical lock !!! | |
348 */ | |
349 void MemTracker::create_memory_record(address addr, MEMFLAGS flags, | |
350 size_t size, address pc, Thread* thread) { | |
6768
716e6ef4482a
7190089: NMT ON: NMT failed assertion on thread's stack base address
zgu
parents:
6607
diff
changeset
|
351 assert(addr != NULL, "Sanity check"); |
6197 | 352 if (!shutdown_in_progress()) { |
353 // single thread, we just write records direct to global recorder,' | |
354 // with any lock | |
355 if (_state == NMT_bootstrapping_single_thread) { | |
356 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); | |
357 thread = NULL; | |
358 } else { | |
359 if (thread == NULL) { | |
360 // don't use Thread::current(), since it is possible that | |
361 // the calling thread has yet to attach to VM 'Thread', | |
362 // which will result assertion failure | |
363 thread = ThreadLocalStorage::thread(); | |
364 } | |
365 } | |
366 | |
367 if (thread != NULL) { | |
8810
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
368 // slow down all calling threads except NMT worker thread, so it |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
369 // can catch up. |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
370 if (_slowdown_calling_thread && thread != _worker_thread) { |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
371 os::yield_all(); |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
372 } |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
373 |
6197 | 374 if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) { |
6882
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6781
diff
changeset
|
375 JavaThread* java_thread = (JavaThread*)thread; |
6231 | 376 JavaThreadState state = java_thread->thread_state(); |
377 if (SafepointSynchronize::safepoint_safe(java_thread, state)) { | |
378 // JavaThreads that are safepoint safe, can run through safepoint, | |
379 // so ThreadCritical is needed to ensure no threads at safepoint create | |
380 // new records while the records are being gathered and the sequence number is changing | |
381 ThreadCritical tc; | |
382 create_record_in_recorder(addr, flags, size, pc, java_thread); | |
6197 | 383 } else { |
6231 | 384 create_record_in_recorder(addr, flags, size, pc, java_thread); |
6197 | 385 } |
386 } else { | |
387 // other threads, such as worker and watcher threads, etc. need to | |
388 // take ThreadCritical to write to global recorder | |
389 ThreadCritical tc; | |
390 create_record_in_recorder(addr, flags, size, pc, NULL); | |
391 } | |
392 } else { | |
393 if (_state == NMT_bootstrapping_single_thread) { | |
394 // single thread, no lock needed | |
395 create_record_in_recorder(addr, flags, size, pc, NULL); | |
396 } else { | |
397 // for thread has yet to attach VM 'Thread', we can not use VM mutex. | |
398 // use native thread critical instead | |
399 ThreadCritical tc; | |
400 create_record_in_recorder(addr, flags, size, pc, NULL); | |
401 } | |
402 } | |
403 } | |
404 } | |
405 | |
406 // write a record to proper recorder. No lock can be taken from this method | |
407 // down. | |
408 void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags, | |
6231 | 409 size_t size, address pc, JavaThread* thread) { |
6197 | 410 |
6231 | 411 MemRecorder* rc = get_thread_recorder(thread); |
6197 | 412 if (rc != NULL) { |
413 rc->record(addr, flags, size, pc); | |
414 } | |
415 } | |
416 | |
417 /** | |
418 * enqueue a recorder to pending queue | |
419 */ | |
420 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) { | |
421 assert(rec != NULL, "null recorder"); | |
422 | |
423 // we are shutting down, so just delete it | |
424 if (shutdown_in_progress()) { | |
425 rec->set_next(NULL); | |
426 delete rec; | |
427 return; | |
428 } | |
429 | |
430 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue); | |
431 rec->set_next(cur_head); | |
432 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue, | |
433 (void*)cur_head)) { | |
434 cur_head = const_cast<MemRecorder*>(_merge_pending_queue); | |
435 rec->set_next(cur_head); | |
436 } | |
6607
e5bf1c79ed5b
7191124: Optimized build is broken due to inconsistent use of DEBUG_ONLY and NOT_PRODUCT macros in NMT
zgu
parents:
6235
diff
changeset
|
437 NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);) |
6197 | 438 } |
439 | |
440 /* | |
441 * The method is called at global safepoint | |
442 * during it synchronization process. | |
443 * 1. enqueue all JavaThreads' per-thread recorders | |
444 * 2. enqueue global recorder | |
445 * 3. retrieve all pending recorders | |
446 * 4. reset global sequence number generator | |
447 * 5. call worker's sync | |
448 */ | |
449 #define MAX_SAFEPOINTS_TO_SKIP 128 | |
450 #define SAFE_SEQUENCE_THRESHOLD 30 | |
451 #define HIGH_GENERATION_THRESHOLD 60 | |
8810
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
452 #define MAX_RECORDER_THREAD_RATIO 30 |
6197 | 453 |
454 void MemTracker::sync() { | |
455 assert(_tracking_level > NMT_off, "NMT is not enabled"); | |
456 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); | |
457 | |
458 // Some GC tests hit large number of safepoints in short period of time | |
459 // without meaningful activities. We should prevent going to | |
460 // sync point in these cases, which can potentially exhaust generation buffer. | |
461 // Here is the factots to determine if we should go into sync point: | |
462 // 1. not to overflow sequence number | |
463 // 2. if we are in danger to overflow generation buffer | |
464 // 3. how many safepoints we already skipped sync point | |
465 if (_state == NMT_started) { | |
466 // worker thread is not ready, no one can manage generation | |
467 // buffer, so skip this safepoint | |
468 if (_worker_thread == NULL) return; | |
469 | |
470 if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) { | |
471 int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint; | |
472 int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS; | |
473 if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) { | |
474 _sync_point_skip_count ++; | |
475 return; | |
476 } | |
477 } | |
478 _sync_point_skip_count = 0; | |
479 { | |
480 // This method is running at safepoint, with ThreadCritical lock, | |
481 // it should guarantee that NMT is fully sync-ed. | |
482 ThreadCritical tc; | |
6231 | 483 |
6882
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6781
diff
changeset
|
484 SequenceGenerator::reset(); |
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6781
diff
changeset
|
485 |
6231 | 486 // walk all JavaThreads to collect recorders |
487 SyncThreadRecorderClosure stc; | |
488 Threads::threads_do(&stc); | |
489 | |
490 _thread_count = stc.get_thread_count(); | |
491 MemRecorder* pending_recorders = get_pending_recorders(); | |
492 | |
6197 | 493 if (_global_recorder != NULL) { |
494 _global_recorder->set_next(pending_recorders); | |
495 pending_recorders = _global_recorder; | |
496 _global_recorder = NULL; | |
497 } | |
8810
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
498 |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
499 // see if NMT has too many outstanding recorder instances, it usually |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
500 // means that worker thread is lagging behind in processing them. |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
501 if (!AutoShutdownNMT) { |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
502 _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count); |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
503 } |
06db4c0afbf3
8009298: NMT: Special version of class loading/unloading with runThese stresses out NMT
zgu
parents:
7971
diff
changeset
|
504 |
6197 | 505 // check _worker_thread with lock to avoid racing condition |
506 if (_worker_thread != NULL) { | |
7464
ecd24264898b
8005048: NMT: #loaded classes needs to just show the # defined classes
zgu
parents:
7172
diff
changeset
|
507 _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes()); |
6197 | 508 } |
6882
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6781
diff
changeset
|
509 |
716c64bda5ba
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
6781
diff
changeset
|
510 assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); |
6197 | 511 } |
512 } | |
513 | |
514 // now, it is the time to shut whole things off | |
515 if (_state == NMT_final_shutdown) { | |
516 // walk all JavaThreads to delete all recorders | |
517 SyncThreadRecorderClosure stc; | |
518 Threads::threads_do(&stc); | |
519 // delete global recorder | |
520 { | |
521 ThreadCritical tc; | |
522 if (_global_recorder != NULL) { | |
523 delete _global_recorder; | |
524 _global_recorder = NULL; | |
525 } | |
526 } | |
6231 | 527 MemRecorder* pending_recorders = get_pending_recorders(); |
528 if (pending_recorders != NULL) { | |
529 delete pending_recorders; | |
530 } | |
531 // try at a later sync point to ensure MemRecorder instance drops to zero to | |
532 // completely shutdown NMT | |
533 if (MemRecorder::_instance_count == 0) { | |
534 _state = NMT_shutdown; | |
535 _tracking_level = NMT_off; | |
536 } | |
6197 | 537 } |
538 } | |
539 | |
540 /* | |
541 * Start worker thread. | |
542 */ | |
543 bool MemTracker::start_worker() { | |
544 assert(_worker_thread == NULL, "Just Check"); | |
545 _worker_thread = new (std::nothrow) MemTrackWorker(); | |
546 if (_worker_thread == NULL || _worker_thread->has_error()) { | |
547 shutdown(NMT_initialization); | |
548 return false; | |
549 } | |
550 _worker_thread->start(); | |
551 return true; | |
552 } | |
553 | |
554 /* | |
555 * We need to collect a JavaThread's per-thread recorder | |
556 * before it exits. | |
557 */ | |
558 void MemTracker::thread_exiting(JavaThread* thread) { | |
559 if (is_on()) { | |
560 MemRecorder* rec = thread->get_recorder(); | |
561 if (rec != NULL) { | |
562 enqueue_pending_recorder(rec); | |
563 thread->set_recorder(NULL); | |
564 } | |
565 } | |
566 } | |
567 | |
568 // baseline current memory snapshot | |
569 bool MemTracker::baseline() { | |
6232
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
570 MutexLockerEx lock(_query_lock, true); |
6197 | 571 MemSnapshot* snapshot = get_snapshot(); |
572 if (snapshot != NULL) { | |
573 return _baseline.baseline(*snapshot, false); | |
574 } | |
575 return false; | |
576 } | |
577 | |
578 // print memory usage from current snapshot | |
579 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { | |
580 MemBaseline baseline; | |
6232
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
581 MutexLockerEx lock(_query_lock, true); |
6197 | 582 MemSnapshot* snapshot = get_snapshot(); |
583 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { | |
584 BaselineReporter reporter(out, unit); | |
585 reporter.report_baseline(baseline, summary_only); | |
586 return true; | |
587 } | |
588 return false; | |
589 } | |
590 | |
7971
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
591 // Whitebox API for blocking until the current generation of NMT data has been merged |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
592 bool MemTracker::wbtest_wait_for_data_merge() { |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
593 // NMT can't be shutdown while we're holding _query_lock |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
594 MutexLockerEx lock(_query_lock, true); |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
595 assert(_worker_thread != NULL, "Invalid query"); |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
596 // the generation at query time, so NMT will spin till this generation is processed |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
597 unsigned long generation_at_query_time = SequenceGenerator::current_generation(); |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
598 unsigned long current_processing_generation = _processing_generation; |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
599 // if generation counter overflown |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
600 bool generation_overflown = (generation_at_query_time < current_processing_generation); |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
601 long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
602 // spin |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
603 while (!shutdown_in_progress()) { |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
604 if (!generation_overflown) { |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
605 if (current_processing_generation > generation_at_query_time) { |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
606 return true; |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
607 } |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
608 } else { |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
609 assert(generations_to_wrap >= 0, "Sanity check"); |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
610 long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
611 assert(current_generations_to_wrap >= 0, "Sanity check"); |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
612 // to overflow an unsigned long should take long time, so to_wrap check should be sufficient |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
613 if (current_generations_to_wrap > generations_to_wrap && |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
614 current_processing_generation > generation_at_query_time) { |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
615 return true; |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
616 } |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
617 } |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
618 |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
619 // if worker thread is idle, but generation is not advancing, that means |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
620 // there is not safepoint to let NMT advance generation, force one. |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
621 if (_worker_thread_idle) { |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
622 VM_ForceSafepoint vfs; |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
623 VMThread::execute(&vfs); |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
624 } |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
625 MemSnapshot* snapshot = get_snapshot(); |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
626 if (snapshot == NULL) { |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
627 return false; |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
628 } |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
629 snapshot->wait(1000); |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
630 current_processing_generation = _processing_generation; |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
631 } |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
632 // We end up here if NMT is shutting down before our data has been merged |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
633 return false; |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
634 } |
4102b59539ce
8005012: Add WB APIs to better support NMT testing
ctornqvi
parents:
7464
diff
changeset
|
635 |
6197 | 636 // compare memory usage between current snapshot and baseline |
637 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { | |
6232
f1f45dddb0bd
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
zgu
parents:
6197
diff
changeset
|
638 MutexLockerEx lock(_query_lock, true); |
6197 | 639 if (_baseline.baselined()) { |
640 MemBaseline baseline; | |
641 MemSnapshot* snapshot = get_snapshot(); | |
642 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { | |
643 BaselineReporter reporter(out, unit); | |
644 reporter.diff_baselines(baseline, _baseline, summary_only); | |
645 return true; | |
646 } | |
647 } | |
648 return false; | |
649 } | |
650 | |
651 #ifndef PRODUCT | |
652 void MemTracker::walk_stack(int toSkip, char* buf, int len) { | |
653 int cur_len = 0; | |
654 char tmp[1024]; | |
655 address pc; | |
656 | |
657 while (cur_len < len) { | |
658 pc = os::get_caller_pc(toSkip + 1); | |
659 if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) { | |
660 jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp); | |
661 cur_len = (int)strlen(buf); | |
662 } else { | |
663 buf[cur_len] = '\0'; | |
664 break; | |
665 } | |
666 toSkip ++; | |
667 } | |
668 } | |
669 | |
670 void MemTracker::print_tracker_stats(outputStream* st) { | |
671 st->print_cr("\nMemory Tracker Stats:"); | |
672 st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num()); | |
673 st->print_cr("\tthead count = %d", _thread_count); | |
674 st->print_cr("\tArena instance = %d", Arena::_instance_count); | |
675 st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count); | |
676 st->print_cr("\tqueued recorder count = %d", _pending_recorder_count); | |
677 st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count); | |
678 if (_worker_thread != NULL) { | |
679 st->print_cr("\tWorker thread:"); | |
680 st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count); | |
681 st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders()); | |
682 st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count); | |
683 } else { | |
684 st->print_cr("\tWorker thread is not started"); | |
685 } | |
686 st->print_cr(" "); | |
687 | |
688 if (_snapshot != NULL) { | |
689 _snapshot->print_snapshot_stats(st); | |
690 } else { | |
691 st->print_cr("No snapshot"); | |
692 } | |
693 } | |
694 #endif | |
695 |