comparison src/share/vm/runtime/safepoint.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 2328d1d3f8cf
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_safepoint.cpp.incl"
27
28 // --------------------------------------------------------------------------------------------------
29 // Implementation of Safepoint begin/end
30
31 SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
32 volatile int SafepointSynchronize::_waiting_to_block = 0;
33 jlong SafepointSynchronize::_last_safepoint = 0;
34 volatile int SafepointSynchronize::_safepoint_counter = 0;
35 static volatile int PageArmed = 0 ; // safepoint polling page is RO|RW vs PROT_NONE
36 static volatile int TryingToBlock = 0 ; // proximate value -- for advisory use only
37 static bool timeout_error_printed = false;
38
39 // Roll all threads forward to a safepoint and suspend them all
40 void SafepointSynchronize::begin() {
41
42 Thread* myThread = Thread::current();
43 assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
44
45 _last_safepoint = os::javaTimeNanos();
46
47 #ifndef SERIALGC
48 if (UseConcMarkSweepGC) {
49 // In the future we should investigate whether CMS can use the
50 // more-general mechanism below. DLD (01/05).
51 ConcurrentMarkSweepThread::synchronize(false);
52 } else {
53 ConcurrentGCThread::safepoint_synchronize();
54 }
55 #endif // SERIALGC
56
57 // By getting the Threads_lock, we assure that no threads are about to start or
58 // exit. It is released again in SafepointSynchronize::end().
59 Threads_lock->lock();
60
61 assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");
62
63 int nof_threads = Threads::number_of_threads();
64
65 if (TraceSafepoint) {
66 tty->print_cr("Safepoint synchronization initiated. (%d)", nof_threads);
67 }
68
69 RuntimeService::record_safepoint_begin();
70
71 {
72 MutexLocker mu(Safepoint_lock);
73
74 // Set number of threads to wait for, before we initiate the callbacks
75 _waiting_to_block = nof_threads;
76 TryingToBlock = 0 ;
77 int still_running = nof_threads;
78
79 // Save the starting time, so that it can be compared to see if this has taken
80 // too long to complete.
81 jlong safepoint_limit_time;
82 timeout_error_printed = false;
83
84 // Begin the process of bringing the system to a safepoint.
85 // Java threads can be in several different states and are
86 // stopped by different mechanisms:
87 //
88 // 1. Running interpreted
89 // The interpeter dispatch table is changed to force it to
90 // check for a safepoint condition between bytecodes.
91 // 2. Running in native code
92 // When returning from the native code, a Java thread must check
93 // the safepoint _state to see if we must block. If the
94 // VM thread sees a Java thread in native, it does
95 // not wait for this thread to block. The order of the memory
96 // writes and reads of both the safepoint state and the Java
97 // threads state is critical. In order to guarantee that the
98 // memory writes are serialized with respect to each other,
99 // the VM thread issues a memory barrier instruction
100 // (on MP systems). In order to avoid the overhead of issuing
101 // a memory barrier for each Java thread making native calls, each Java
102 // thread performs a write to a single memory page after changing
103 // the thread state. The VM thread performs a sequence of
104 // mprotect OS calls which forces all previous writes from all
105 // Java threads to be serialized. This is done in the
106 // os::serialize_thread_states() call. This has proven to be
107 // much more efficient than executing a membar instruction
108 // on every call to native code.
109 // 3. Running compiled Code
110 // Compiled code reads a global (Safepoint Polling) page that
111 // is set to fault if we are trying to get to a safepoint.
112 // 4. Blocked
113 // A thread which is blocked will not be allowed to return from the
114 // block condition until the safepoint operation is complete.
115 // 5. In VM or Transitioning between states
116 // If a Java thread is currently running in the VM or transitioning
117 // between states, the safepointing code will wait for the thread to
118 // block itself when it attempts transitions to a new state.
119 //
120 _state = _synchronizing;
121 OrderAccess::fence();
122
123 // Flush all thread states to memory
124 if (!UseMembar) {
125 os::serialize_thread_states();
126 }
127
128 // Make interpreter safepoint aware
129 Interpreter::notice_safepoints();
130
131 if (UseCompilerSafepoints && DeferPollingPageLoopCount < 0) {
132 // Make polling safepoint aware
133 guarantee (PageArmed == 0, "invariant") ;
134 PageArmed = 1 ;
135 os::make_polling_page_unreadable();
136 }
137
138 // Consider using active_processor_count() ... but that call is expensive.
139 int ncpus = os::processor_count() ;
140
141 #ifdef ASSERT
142 for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
143 assert(cur->safepoint_state()->is_running(), "Illegal initial state");
144 }
145 #endif // ASSERT
146
147 if (SafepointTimeout)
148 safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
149
150 // Iterate through all threads until it have been determined how to stop them all at a safepoint
151 unsigned int iterations = 0;
152 int steps = 0 ;
153 while(still_running > 0) {
154 for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
155 assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
156 ThreadSafepointState *cur_state = cur->safepoint_state();
157 if (cur_state->is_running()) {
158 cur_state->examine_state_of_thread();
159 if (!cur_state->is_running()) {
160 still_running--;
161 // consider adjusting steps downward:
162 // steps = 0
163 // steps -= NNN
164 // steps >>= 1
165 // steps = MIN(steps, 2000-100)
166 // if (iterations != 0) steps -= NNN
167 }
168 if (TraceSafepoint && Verbose) cur_state->print();
169 }
170 }
171
172 if ( (PrintSafepointStatistics || (PrintSafepointStatisticsTimeout > 0))
173 && iterations == 0) {
174 begin_statistics(nof_threads, still_running);
175 }
176
177 if (still_running > 0) {
178 // Check for if it takes to long
179 if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
180 print_safepoint_timeout(_spinning_timeout);
181 }
182
183 // Spin to avoid context switching.
184 // There's a tension between allowing the mutators to run (and rendezvous)
185 // vs spinning. As the VM thread spins, wasting cycles, it consumes CPU that
186 // a mutator might otherwise use profitably to reach a safepoint. Excessive
187 // spinning by the VM thread on a saturated system can increase rendezvous latency.
188 // Blocking or yielding incur their own penalties in the form of context switching
189 // and the resultant loss of $ residency.
190 //
191 // Further complicating matters is that yield() does not work as naively expected
192 // on many platforms -- yield() does not guarantee that any other ready threads
193 // will run. As such we revert yield_all() after some number of iterations.
194 // Yield_all() is implemented as a short unconditional sleep on some platforms.
195 // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
196 // can actually increase the time it takes the VM thread to detect that a system-wide
197 // stop-the-world safepoint has been reached. In a pathological scenario such as that
198 // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
199 // In that case the mutators will be stalled waiting for the safepoint to complete and the
200 // the VMthread will be sleeping, waiting for the mutators to rendezvous. The VMthread
201 // will eventually wake up and detect that all mutators are safe, at which point
202 // we'll again make progress.
203 //
204 // Beware too that that the VMThread typically runs at elevated priority.
205 // Its default priority is higher than the default mutator priority.
206 // Obviously, this complicates spinning.
207 //
208 // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
209 // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
210 //
211 // See the comments in synchronizer.cpp for additional remarks on spinning.
212 //
213 // In the future we might:
214 // 1. Modify the safepoint scheme to avoid potentally unbounded spinning.
215 // This is tricky as the path used by a thread exiting the JVM (say on
216 // on JNI call-out) simply stores into its state field. The burden
217 // is placed on the VM thread, which must poll (spin).
218 // 2. Find something useful to do while spinning. If the safepoint is GC-related
219 // we might aggressively scan the stacks of threads that are already safe.
220 // 3. Use Solaris schedctl to examine the state of the still-running mutators.
221 // If all the mutators are ONPROC there's no reason to sleep or yield.
222 // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
223 // 5. Check system saturation. If the system is not fully saturated then
224 // simply spin and avoid sleep/yield.
225 // 6. As still-running mutators rendezvous they could unpark the sleeping
226 // VMthread. This works well for still-running mutators that become
227 // safe. The VMthread must still poll for mutators that call-out.
228 // 7. Drive the policy on time-since-begin instead of iterations.
229 // 8. Consider making the spin duration a function of the # of CPUs:
230 // Spin = (((ncpus-1) * M) + K) + F(still_running)
231 // Alternately, instead of counting iterations of the outer loop
232 // we could count the # of threads visited in the inner loop, above.
233 // 9. On windows consider using the return value from SwitchThreadTo()
234 // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
235
236 if (UseCompilerSafepoints && int(iterations) == DeferPollingPageLoopCount) {
237 guarantee (PageArmed == 0, "invariant") ;
238 PageArmed = 1 ;
239 os::make_polling_page_unreadable();
240 }
241
242 // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
243 // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
244 ++steps ;
245 if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
246 SpinPause() ; // MP-Polite spin
247 } else
248 if (steps < DeferThrSuspendLoopCount) {
249 os::NakedYield() ;
250 } else {
251 os::yield_all(steps) ;
252 // Alternately, the VM thread could transiently depress its scheduling priority or
253 // transiently increase the priority of the tardy mutator(s).
254 }
255
256 iterations ++ ;
257 }
258 assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
259 }
260 assert(still_running == 0, "sanity check");
261
262 if (PrintSafepointStatistics) {
263 update_statistics_on_spin_end();
264 }
265
266 // wait until all threads are stopped
267 while (_waiting_to_block > 0) {
268 if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
269 if (!SafepointTimeout || timeout_error_printed) {
270 Safepoint_lock->wait(true); // true, means with no safepoint checks
271 } else {
272 // Compute remaining time
273 jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
274
275 // If there is no remaining time, then there is an error
276 if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
277 print_safepoint_timeout(_blocking_timeout);
278 }
279 }
280 }
281 assert(_waiting_to_block == 0, "sanity check");
282
283 #ifndef PRODUCT
284 if (SafepointTimeout) {
285 jlong current_time = os::javaTimeNanos();
286 if (safepoint_limit_time < current_time) {
287 tty->print_cr("# SafepointSynchronize: Finished after "
288 INT64_FORMAT_W(6) " ms",
289 ((current_time - safepoint_limit_time) / MICROUNITS +
290 SafepointTimeoutDelay));
291 }
292 }
293 #endif
294
295 assert((_safepoint_counter & 0x1) == 0, "must be even");
296 assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
297 _safepoint_counter ++;
298
299 // Record state
300 _state = _synchronized;
301
302 OrderAccess::fence();
303
304 if (TraceSafepoint) {
305 VM_Operation *op = VMThread::vm_operation();
306 tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
307 }
308
309 RuntimeService::record_safepoint_synchronized();
310 if (PrintSafepointStatistics) {
311 update_statistics_on_sync_end(os::javaTimeNanos());
312 }
313
314 // Call stuff that needs to be run when a safepoint is just about to be completed
315 do_cleanup_tasks();
316 }
317 }
318
319 // Wake up all threads, so they are ready to resume execution after the safepoint
320 // operation has been carried out
321 void SafepointSynchronize::end() {
322
323 assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
324 assert((_safepoint_counter & 0x1) == 1, "must be odd");
325 _safepoint_counter ++;
326 // memory fence isn't required here since an odd _safepoint_counter
327 // value can do no harm and a fence is issued below anyway.
328
329 DEBUG_ONLY(Thread* myThread = Thread::current();)
330 assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
331
332 if (PrintSafepointStatistics) {
333 end_statistics(os::javaTimeNanos());
334 }
335
336 #ifdef ASSERT
337 // A pending_exception cannot be installed during a safepoint. The threads
338 // may install an async exception after they come back from a safepoint into
339 // pending_exception after they unblock. But that should happen later.
340 for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
341 assert (!(cur->has_pending_exception() &&
342 cur->safepoint_state()->is_at_poll_safepoint()),
343 "safepoint installed a pending exception");
344 }
345 #endif // ASSERT
346
347 if (PageArmed) {
348 // Make polling safepoint aware
349 os::make_polling_page_readable();
350 PageArmed = 0 ;
351 }
352
353 // Remove safepoint check from interpreter
354 Interpreter::ignore_safepoints();
355
356 {
357 MutexLocker mu(Safepoint_lock);
358
359 assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization");
360
361 // Set to not synchronized, so the threads will not go into the signal_thread_blocked method
362 // when they get restarted.
363 _state = _not_synchronized;
364 OrderAccess::fence();
365
366 if (TraceSafepoint) {
367 tty->print_cr("Leaving safepoint region");
368 }
369
370 // Start suspended threads
371 for(JavaThread *current = Threads::first(); current; current = current->next()) {
372 // A problem occuring on Solaris is when attempting to restart threads
373 // the first #cpus - 1 go well, but then the VMThread is preempted when we get
374 // to the next one (since it has been running the longest). We then have
375 // to wait for a cpu to become available before we can continue restarting
376 // threads.
377 // FIXME: This causes the performance of the VM to degrade when active and with
378 // large numbers of threads. Apparently this is due to the synchronous nature
379 // of suspending threads.
380 //
381 // TODO-FIXME: the comments above are vestigial and no longer apply.
382 // Furthermore, using solaris' schedctl in this particular context confers no benefit
383 if (VMThreadHintNoPreempt) {
384 os::hint_no_preempt();
385 }
386 ThreadSafepointState* cur_state = current->safepoint_state();
387 assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
388 cur_state->restart();
389 assert(cur_state->is_running(), "safepoint state has not been reset");
390 }
391
392 RuntimeService::record_safepoint_end();
393
394 // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
395 // blocked in signal_thread_blocked
396 Threads_lock->unlock();
397
398 }
399 #ifndef SERIALGC
400 // If there are any concurrent GC threads resume them.
401 if (UseConcMarkSweepGC) {
402 ConcurrentMarkSweepThread::desynchronize(false);
403 } else {
404 ConcurrentGCThread::safepoint_desynchronize();
405 }
406 #endif // SERIALGC
407 }
408
409 bool SafepointSynchronize::is_cleanup_needed() {
410 // Need a safepoint if some inline cache buffers is non-empty
411 if (!InlineCacheBuffer::is_empty()) return true;
412 return false;
413 }
414
415 jlong CounterDecay::_last_timestamp = 0;
416
417 static void do_method(methodOop m) {
418 m->invocation_counter()->decay();
419 }
420
421 void CounterDecay::decay() {
422 _last_timestamp = os::javaTimeMillis();
423
424 // This operation is going to be performed only at the end of a safepoint
425 // and hence GC's will not be going on, all Java mutators are suspended
426 // at this point and hence SystemDictionary_lock is also not needed.
427 assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
428 int nclasses = SystemDictionary::number_of_classes();
429 double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
430 CounterHalfLifeTime);
431 for (int i = 0; i < classes_per_tick; i++) {
432 klassOop k = SystemDictionary::try_get_next_class();
433 if (k != NULL && k->klass_part()->oop_is_instance()) {
434 instanceKlass::cast(k)->methods_do(do_method);
435 }
436 }
437 }
438
439 // Various cleaning tasks that should be done periodically at safepoints
440 void SafepointSynchronize::do_cleanup_tasks() {
441 jlong cleanup_time;
442
443 // Update fat-monitor pool, since this is a safepoint.
444 if (TraceSafepoint) {
445 cleanup_time = os::javaTimeNanos();
446 }
447
448 ObjectSynchronizer::deflate_idle_monitors();
449 InlineCacheBuffer::update_inline_caches();
450 if(UseCounterDecay && CounterDecay::is_decay_needed()) {
451 CounterDecay::decay();
452 }
453 NMethodSweeper::sweep();
454
455 if (TraceSafepoint) {
456 tty->print_cr("do_cleanup_tasks takes "INT64_FORMAT_W(6) "ms",
457 (os::javaTimeNanos() - cleanup_time) / MICROUNITS);
458 }
459 }
460
461
462 bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
463 switch(state) {
464 case _thread_in_native:
465 // native threads are safe if they have no java stack or have walkable stack
466 return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable();
467
468 // blocked threads should have already have walkable stack
469 case _thread_blocked:
470 assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable");
471 return true;
472
473 default:
474 return false;
475 }
476 }
477
478
479 // -------------------------------------------------------------------------------------------------------
480 // Implementation of Safepoint callback point
481
482 void SafepointSynchronize::block(JavaThread *thread) {
483 assert(thread != NULL, "thread must be set");
484 assert(thread->is_Java_thread(), "not a Java thread");
485
486 // Threads shouldn't block if they are in the middle of printing, but...
487 ttyLocker::break_tty_lock_for_safepoint(os::current_thread_id());
488
489 // Only bail from the block() call if the thread is gone from the
490 // thread list; starting to exit should still block.
491 if (thread->is_terminated()) {
492 // block current thread if we come here from native code when VM is gone
493 thread->block_if_vm_exited();
494
495 // otherwise do nothing
496 return;
497 }
498
499 JavaThreadState state = thread->thread_state();
500 thread->frame_anchor()->make_walkable(thread);
501
502 // Check that we have a valid thread_state at this point
503 switch(state) {
504 case _thread_in_vm_trans:
505 case _thread_in_Java: // From compiled code
506
507 // We are highly likely to block on the Safepoint_lock. In order to avoid blocking in this case,
508 // we pretend we are still in the VM.
509 thread->set_thread_state(_thread_in_vm);
510
511 if (is_synchronizing()) {
512 Atomic::inc (&TryingToBlock) ;
513 }
514
515 // We will always be holding the Safepoint_lock when we are examine the state
516 // of a thread. Hence, the instructions between the Safepoint_lock->lock() and
517 // Safepoint_lock->unlock() are happening atomic with regards to the safepoint code
518 Safepoint_lock->lock_without_safepoint_check();
519 if (is_synchronizing()) {
520 // Decrement the number of threads to wait for and signal vm thread
521 assert(_waiting_to_block > 0, "sanity check");
522 _waiting_to_block--;
523 thread->safepoint_state()->set_has_called_back(true);
524
525 // Consider (_waiting_to_block < 2) to pipeline the wakeup of the VM thread
526 if (_waiting_to_block == 0) {
527 Safepoint_lock->notify_all();
528 }
529 }
530
531 // We transition the thread to state _thread_blocked here, but
532 // we can't do our usual check for external suspension and then
533 // self-suspend after the lock_without_safepoint_check() call
534 // below because we are often called during transitions while
535 // we hold different locks. That would leave us suspended while
536 // holding a resource which results in deadlocks.
537 thread->set_thread_state(_thread_blocked);
538 Safepoint_lock->unlock();
539
540 // We now try to acquire the threads lock. Since this lock is hold by the VM thread during
541 // the entire safepoint, the threads will all line up here during the safepoint.
542 Threads_lock->lock_without_safepoint_check();
543 // restore original state. This is important if the thread comes from compiled code, so it
544 // will continue to execute with the _thread_in_Java state.
545 thread->set_thread_state(state);
546 Threads_lock->unlock();
547 break;
548
549 case _thread_in_native_trans:
550 case _thread_blocked_trans:
551 case _thread_new_trans:
552 if (thread->safepoint_state()->type() == ThreadSafepointState::_call_back) {
553 thread->print_thread_state();
554 fatal("Deadlock in safepoint code. "
555 "Should have called back to the VM before blocking.");
556 }
557
558 // We transition the thread to state _thread_blocked here, but
559 // we can't do our usual check for external suspension and then
560 // self-suspend after the lock_without_safepoint_check() call
561 // below because we are often called during transitions while
562 // we hold different locks. That would leave us suspended while
563 // holding a resource which results in deadlocks.
564 thread->set_thread_state(_thread_blocked);
565
566 // It is not safe to suspend a thread if we discover it is in _thread_in_native_trans. Hence,
567 // the safepoint code might still be waiting for it to block. We need to change the state here,
568 // so it can see that it is at a safepoint.
569
570 // Block until the safepoint operation is completed.
571 Threads_lock->lock_without_safepoint_check();
572
573 // Restore state
574 thread->set_thread_state(state);
575
576 Threads_lock->unlock();
577 break;
578
579 default:
580 fatal1("Illegal threadstate encountered: %d", state);
581 }
582
583 // Check for pending. async. exceptions or suspends - except if the
584 // thread was blocked inside the VM. has_special_runtime_exit_condition()
585 // is called last since it grabs a lock and we only want to do that when
586 // we must.
587 //
588 // Note: we never deliver an async exception at a polling point as the
589 // compiler may not have an exception handler for it. The polling
590 // code will notice the async and deoptimize and the exception will
591 // be delivered. (Polling at a return point is ok though). Sure is
592 // a lot of bother for a deprecated feature...
593 //
594 // We don't deliver an async exception if the thread state is
595 // _thread_in_native_trans so JNI functions won't be called with
596 // a surprising pending exception. If the thread state is going back to java,
597 // async exception is checked in check_special_condition_for_native_trans().
598
599 if (state != _thread_blocked_trans &&
600 state != _thread_in_vm_trans &&
601 thread->has_special_runtime_exit_condition()) {
602 thread->handle_special_runtime_exit_condition(
603 !thread->is_at_poll_safepoint() && (state != _thread_in_native_trans));
604 }
605 }
606
607 // ------------------------------------------------------------------------------------------------------
608 // Exception handlers
609
610 #ifndef PRODUCT
611 #ifdef _LP64
612 #define PTR_PAD ""
613 #else
614 #define PTR_PAD " "
615 #endif
616
617 static void print_ptrs(intptr_t oldptr, intptr_t newptr, bool wasoop) {
618 bool is_oop = newptr ? ((oop)newptr)->is_oop() : false;
619 tty->print_cr(PTR_FORMAT PTR_PAD " %s %c " PTR_FORMAT PTR_PAD " %s %s",
620 oldptr, wasoop?"oop":" ", oldptr == newptr ? ' ' : '!',
621 newptr, is_oop?"oop":" ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":" "));
622 }
623
624 static void print_longs(jlong oldptr, jlong newptr, bool wasoop) {
625 bool is_oop = newptr ? ((oop)(intptr_t)newptr)->is_oop() : false;
626 tty->print_cr(PTR64_FORMAT " %s %c " PTR64_FORMAT " %s %s",
627 oldptr, wasoop?"oop":" ", oldptr == newptr ? ' ' : '!',
628 newptr, is_oop?"oop":" ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":" "));
629 }
630
631 #ifdef SPARC
632 static void print_me(intptr_t *new_sp, intptr_t *old_sp, bool *was_oops) {
633 #ifdef _LP64
634 tty->print_cr("--------+------address-----+------before-----------+-------after----------+");
635 const int incr = 1; // Increment to skip a long, in units of intptr_t
636 #else
637 tty->print_cr("--------+--address-+------before-----------+-------after----------+");
638 const int incr = 2; // Increment to skip a long, in units of intptr_t
639 #endif
640 tty->print_cr("---SP---|");
641 for( int i=0; i<16; i++ ) {
642 tty->print("blob %c%d |"PTR_FORMAT" ","LO"[i>>3],i&7,new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); }
643 tty->print_cr("--------|");
644 for( int i1=0; i1<frame::memory_parameter_word_sp_offset-16; i1++ ) {
645 tty->print("argv pad|"PTR_FORMAT" ",new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); }
646 tty->print(" pad|"PTR_FORMAT" ",new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++);
647 tty->print_cr("--------|");
648 tty->print(" G1 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
649 tty->print(" G3 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
650 tty->print(" G4 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
651 tty->print(" G5 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
652 tty->print_cr(" FSR |"PTR_FORMAT" "PTR64_FORMAT" "PTR64_FORMAT,new_sp,*(jlong*)old_sp,*(jlong*)new_sp);
653 old_sp += incr; new_sp += incr; was_oops += incr;
654 // Skip the floats
655 tty->print_cr("--Float-|"PTR_FORMAT,new_sp);
656 tty->print_cr("---FP---|");
657 old_sp += incr*32; new_sp += incr*32; was_oops += incr*32;
658 for( int i2=0; i2<16; i2++ ) {
659 tty->print("call %c%d |"PTR_FORMAT" ","LI"[i2>>3],i2&7,new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); }
660 tty->print_cr("");
661 }
662 #endif // SPARC
663 #endif // PRODUCT
664
665
666 void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) {
667 assert(thread->is_Java_thread(), "polling reference encountered by VM thread");
668 assert(thread->thread_state() == _thread_in_Java, "should come from Java code");
669 assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization");
670
671 // Uncomment this to get some serious before/after printing of the
672 // Sparc safepoint-blob frame structure.
673 /*
674 intptr_t* sp = thread->last_Java_sp();
675 intptr_t stack_copy[150];
676 for( int i=0; i<150; i++ ) stack_copy[i] = sp[i];
677 bool was_oops[150];
678 for( int i=0; i<150; i++ )
679 was_oops[i] = stack_copy[i] ? ((oop)stack_copy[i])->is_oop() : false;
680 */
681
682 if (ShowSafepointMsgs) {
683 tty->print("handle_polling_page_exception: ");
684 }
685
686 if (PrintSafepointStatistics) {
687 inc_page_trap_count();
688 }
689
690 ThreadSafepointState* state = thread->safepoint_state();
691
692 state->handle_polling_page_exception();
693 // print_me(sp,stack_copy,was_oops);
694 }
695
696
697 void SafepointSynchronize::print_safepoint_timeout(SafepointTimeoutReason reason) {
698 if (!timeout_error_printed) {
699 timeout_error_printed = true;
700 // Print out the thread infor which didn't reach the safepoint for debugging
701 // purposes (useful when there are lots of threads in the debugger).
702 tty->print_cr("");
703 tty->print_cr("# SafepointSynchronize::begin: Timeout detected:");
704 if (reason == _spinning_timeout) {
705 tty->print_cr("# SafepointSynchronize::begin: Timed out while spinning to reach a safepoint.");
706 } else if (reason == _blocking_timeout) {
707 tty->print_cr("# SafepointSynchronize::begin: Timed out while waiting for threads to stop.");
708 }
709
710 tty->print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:");
711 ThreadSafepointState *cur_state;
712 ResourceMark rm;
713 for(JavaThread *cur_thread = Threads::first(); cur_thread;
714 cur_thread = cur_thread->next()) {
715 cur_state = cur_thread->safepoint_state();
716
717 if (cur_thread->thread_state() != _thread_blocked &&
718 ((reason == _spinning_timeout && cur_state->is_running()) ||
719 (reason == _blocking_timeout && !cur_state->has_called_back()))) {
720 tty->print("# ");
721 cur_thread->print();
722 tty->print_cr("");
723 }
724 }
725 tty->print_cr("# SafepointSynchronize::begin: (End of list)");
726 }
727
728 // To debug the long safepoint, specify both DieOnSafepointTimeout &
729 // ShowMessageBoxOnError.
730 if (DieOnSafepointTimeout) {
731 char msg[1024];
732 VM_Operation *op = VMThread::vm_operation();
733 sprintf(msg, "Safepoint sync time longer than %d ms detected when executing %s.",
734 SafepointTimeoutDelay,
735 op != NULL ? op->name() : "no vm operation");
736 fatal(msg);
737 }
738 }
739
740
741 // -------------------------------------------------------------------------------------------------------
742 // Implementation of ThreadSafepointState
743
744 ThreadSafepointState::ThreadSafepointState(JavaThread *thread) {
745 _thread = thread;
746 _type = _running;
747 _has_called_back = false;
748 _at_poll_safepoint = false;
749 }
750
751 void ThreadSafepointState::create(JavaThread *thread) {
752 ThreadSafepointState *state = new ThreadSafepointState(thread);
753 thread->set_safepoint_state(state);
754 }
755
756 void ThreadSafepointState::destroy(JavaThread *thread) {
757 if (thread->safepoint_state()) {
758 delete(thread->safepoint_state());
759 thread->set_safepoint_state(NULL);
760 }
761 }
762
763 void ThreadSafepointState::examine_state_of_thread() {
764 assert(is_running(), "better be running or just have hit safepoint poll");
765
766 JavaThreadState state = _thread->thread_state();
767
768 // Check for a thread that is suspended. Note that thread resume tries
769 // to grab the Threads_lock which we own here, so a thread cannot be
770 // resumed during safepoint synchronization.
771
772 // We check with locking because another thread that has not yet
773 // synchronized may be trying to suspend this one.
774 bool is_suspended = _thread->is_any_suspended_with_lock();
775 if (is_suspended) {
776 roll_forward(_at_safepoint);
777 return;
778 }
779
780 // Some JavaThread states have an initial safepoint state of
781 // running, but are actually at a safepoint. We will happily
782 // agree and update the safepoint state here.
783 if (SafepointSynchronize::safepoint_safe(_thread, state)) {
784 roll_forward(_at_safepoint);
785 return;
786 }
787
788 if (state == _thread_in_vm) {
789 roll_forward(_call_back);
790 return;
791 }
792
793 // All other thread states will continue to run until they
794 // transition and self-block in state _blocked
795 // Safepoint polling in compiled code causes the Java threads to do the same.
796 // Note: new threads may require a malloc so they must be allowed to finish
797
798 assert(is_running(), "examine_state_of_thread on non-running thread");
799 return;
800 }
801
802 // Returns true is thread could not be rolled forward at present position.
803 void ThreadSafepointState::roll_forward(suspend_type type) {
804 _type = type;
805
806 switch(_type) {
807 case _at_safepoint:
808 SafepointSynchronize::signal_thread_at_safepoint();
809 break;
810
811 case _call_back:
812 set_has_called_back(false);
813 break;
814
815 case _running:
816 default:
817 ShouldNotReachHere();
818 }
819 }
820
821 void ThreadSafepointState::restart() {
822 switch(type()) {
823 case _at_safepoint:
824 case _call_back:
825 break;
826
827 case _running:
828 default:
829 tty->print_cr("restart thread "INTPTR_FORMAT" with state %d",
830 _thread, _type);
831 _thread->print();
832 ShouldNotReachHere();
833 }
834 _type = _running;
835 set_has_called_back(false);
836 }
837
838
839 void ThreadSafepointState::print_on(outputStream *st) const {
840 const char *s;
841
842 switch(_type) {
843 case _running : s = "_running"; break;
844 case _at_safepoint : s = "_at_safepoint"; break;
845 case _call_back : s = "_call_back"; break;
846 default:
847 ShouldNotReachHere();
848 }
849
850 st->print_cr("Thread: " INTPTR_FORMAT
851 " [0x%2x] State: %s _has_called_back %d _at_poll_safepoint %d",
852 _thread, _thread->osthread()->thread_id(), s, _has_called_back,
853 _at_poll_safepoint);
854
855 _thread->print_thread_state_on(st);
856 }
857
858
859 // ---------------------------------------------------------------------------------------------------------------------
860
861 // Block the thread at the safepoint poll or poll return.
862 void ThreadSafepointState::handle_polling_page_exception() {
863
864 // Check state. block() will set thread state to thread_in_vm which will
865 // cause the safepoint state _type to become _call_back.
866 assert(type() == ThreadSafepointState::_running,
867 "polling page exception on thread not running state");
868
869 // Step 1: Find the nmethod from the return address
870 if (ShowSafepointMsgs && Verbose) {
871 tty->print_cr("Polling page exception at " INTPTR_FORMAT, thread()->saved_exception_pc());
872 }
873 address real_return_addr = thread()->saved_exception_pc();
874
875 CodeBlob *cb = CodeCache::find_blob(real_return_addr);
876 assert(cb != NULL && cb->is_nmethod(), "return address should be in nmethod");
877 nmethod* nm = (nmethod*)cb;
878
879 // Find frame of caller
880 frame stub_fr = thread()->last_frame();
881 CodeBlob* stub_cb = stub_fr.cb();
882 assert(stub_cb->is_safepoint_stub(), "must be a safepoint stub");
883 RegisterMap map(thread(), true);
884 frame caller_fr = stub_fr.sender(&map);
885
886 // Should only be poll_return or poll
887 assert( nm->is_at_poll_or_poll_return(real_return_addr), "should not be at call" );
888
889 // This is a poll immediately before a return. The exception handling code
890 // has already had the effect of causing the return to occur, so the execution
891 // will continue immediately after the call. In addition, the oopmap at the
892 // return point does not mark the return value as an oop (if it is), so
893 // it needs a handle here to be updated.
894 if( nm->is_at_poll_return(real_return_addr) ) {
895 // See if return type is an oop.
896 bool return_oop = nm->method()->is_returning_oop();
897 Handle return_value;
898 if (return_oop) {
899 // The oop result has been saved on the stack together with all
900 // the other registers. In order to preserve it over GCs we need
901 // to keep it in a handle.
902 oop result = caller_fr.saved_oop_result(&map);
903 assert(result == NULL || result->is_oop(), "must be oop");
904 return_value = Handle(thread(), result);
905 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
906 }
907
908 // Block the thread
909 SafepointSynchronize::block(thread());
910
911 // restore oop result, if any
912 if (return_oop) {
913 caller_fr.set_saved_oop_result(&map, return_value());
914 }
915 }
916
917 // This is a safepoint poll. Verify the return address and block.
918 else {
919 set_at_poll_safepoint(true);
920
921 // verify the blob built the "return address" correctly
922 assert(real_return_addr == caller_fr.pc(), "must match");
923
924 // Block the thread
925 SafepointSynchronize::block(thread());
926 set_at_poll_safepoint(false);
927
928 // If we have a pending async exception deoptimize the frame
929 // as otherwise we may never deliver it.
930 if (thread()->has_async_condition()) {
931 ThreadInVMfromJavaNoAsyncException __tiv(thread());
932 VM_DeoptimizeFrame deopt(thread(), caller_fr.id());
933 VMThread::execute(&deopt);
934 }
935
936 // If an exception has been installed we must check for a pending deoptimization
937 // Deoptimize frame if exception has been thrown.
938
939 if (thread()->has_pending_exception() ) {
940 RegisterMap map(thread(), true);
941 frame caller_fr = stub_fr.sender(&map);
942 if (caller_fr.is_deoptimized_frame()) {
943 // The exception patch will destroy registers that are still
944 // live and will be needed during deoptimization. Defer the
945 // Async exception should have defered the exception until the
946 // next safepoint which will be detected when we get into
947 // the interpreter so if we have an exception now things
948 // are messed up.
949
950 fatal("Exception installed and deoptimization is pending");
951 }
952 }
953 }
954 }
955
956
957 //
958 // Statistics & Instrumentations
959 //
960 SafepointSynchronize::SafepointStats* SafepointSynchronize::_safepoint_stats = NULL;
961 int SafepointSynchronize::_cur_stat_index = 0;
962 julong SafepointSynchronize::_safepoint_reasons[VM_Operation::VMOp_Terminating];
963 julong SafepointSynchronize::_coalesced_vmop_count = 0;
964 jlong SafepointSynchronize::_max_sync_time = 0;
965
966 // last_safepoint_start_time records the start time of last safepoint.
967 static jlong last_safepoint_start_time = 0;
968 static jlong sync_end_time = 0;
969 static bool need_to_track_page_armed_status = false;
970 static bool init_done = false;
971
972 void SafepointSynchronize::deferred_initialize_stat() {
973 if (init_done) return;
974
975 if (PrintSafepointStatisticsCount <= 0) {
976 fatal("Wrong PrintSafepointStatisticsCount");
977 }
978
979 // If PrintSafepointStatisticsTimeout is specified, the statistics data will
980 // be printed right away, in which case, _safepoint_stats will regress to
981 // a single element array. Otherwise, it is a circular ring buffer with default
982 // size of PrintSafepointStatisticsCount.
983 int stats_array_size;
984 if (PrintSafepointStatisticsTimeout > 0) {
985 stats_array_size = 1;
986 PrintSafepointStatistics = true;
987 } else {
988 stats_array_size = PrintSafepointStatisticsCount;
989 }
990 _safepoint_stats = (SafepointStats*)os::malloc(stats_array_size
991 * sizeof(SafepointStats));
992 guarantee(_safepoint_stats != NULL,
993 "not enough memory for safepoint instrumentation data");
994
995 if (UseCompilerSafepoints && DeferPollingPageLoopCount >= 0) {
996 need_to_track_page_armed_status = true;
997 }
998
999 tty->print(" vmop_name "
1000 "[threads: total initially_running wait_to_block] ");
1001 tty->print("[time: spin block sync] "
1002 "[vmop_time time_elapsed] ");
1003
1004 // no page armed status printed out if it is always armed.
1005 if (need_to_track_page_armed_status) {
1006 tty->print("page_armed ");
1007 }
1008
1009 tty->print_cr("page_trap_count");
1010
1011 init_done = true;
1012 }
1013
1014 void SafepointSynchronize::begin_statistics(int nof_threads, int nof_running) {
1015 deferred_initialize_stat();
1016
1017 SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1018
1019 VM_Operation *op = VMThread::vm_operation();
1020 spstat->_vmop_type = (op != NULL ? op->type() : -1);
1021 if (op != NULL) {
1022 _safepoint_reasons[spstat->_vmop_type]++;
1023 }
1024
1025 spstat->_nof_total_threads = nof_threads;
1026 spstat->_nof_initial_running_threads = nof_running;
1027 spstat->_nof_threads_hit_page_trap = 0;
1028
1029 // Records the start time of spinning. The real time spent on spinning
1030 // will be adjusted when spin is done. Same trick is applied for time
1031 // spent on waiting for threads to block.
1032 if (nof_running != 0) {
1033 spstat->_time_to_spin = os::javaTimeNanos();
1034 } else {
1035 spstat->_time_to_spin = 0;
1036 }
1037
1038 if (last_safepoint_start_time == 0) {
1039 spstat->_time_elapsed_since_last_safepoint = 0;
1040 } else {
1041 spstat->_time_elapsed_since_last_safepoint = _last_safepoint -
1042 last_safepoint_start_time;
1043 }
1044 last_safepoint_start_time = _last_safepoint;
1045 }
1046
1047 void SafepointSynchronize::update_statistics_on_spin_end() {
1048 SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1049
1050 jlong cur_time = os::javaTimeNanos();
1051
1052 spstat->_nof_threads_wait_to_block = _waiting_to_block;
1053 if (spstat->_nof_initial_running_threads != 0) {
1054 spstat->_time_to_spin = cur_time - spstat->_time_to_spin;
1055 }
1056
1057 if (need_to_track_page_armed_status) {
1058 spstat->_page_armed = (PageArmed == 1);
1059 }
1060
1061 // Records the start time of waiting for to block. Updated when block is done.
1062 if (_waiting_to_block != 0) {
1063 spstat->_time_to_wait_to_block = cur_time;
1064 } else {
1065 spstat->_time_to_wait_to_block = 0;
1066 }
1067 }
1068
1069 void SafepointSynchronize::update_statistics_on_sync_end(jlong end_time) {
1070 SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1071
1072 if (spstat->_nof_threads_wait_to_block != 0) {
1073 spstat->_time_to_wait_to_block = end_time -
1074 spstat->_time_to_wait_to_block;
1075 }
1076
1077 // Records the end time of sync which will be used to calculate the total
1078 // vm operation time. Again, the real time spending in syncing will be deducted
1079 // from the start of the sync time later when end_statistics is called.
1080 spstat->_time_to_sync = end_time - _last_safepoint;
1081 if (spstat->_time_to_sync > _max_sync_time) {
1082 _max_sync_time = spstat->_time_to_sync;
1083 }
1084 sync_end_time = end_time;
1085 }
1086
1087 void SafepointSynchronize::end_statistics(jlong vmop_end_time) {
1088 SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1089
1090 // Update the vm operation time.
1091 spstat->_time_to_exec_vmop = vmop_end_time - sync_end_time;
1092 // Only the sync time longer than the specified
1093 // PrintSafepointStatisticsTimeout will be printed out right away.
1094 // By default, it is -1 meaning all samples will be put into the list.
1095 if ( PrintSafepointStatisticsTimeout > 0) {
1096 if (spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
1097 print_statistics();
1098 }
1099 } else {
1100 // The safepoint statistics will be printed out when the _safepoin_stats
1101 // array fills up.
1102 if (_cur_stat_index != PrintSafepointStatisticsCount - 1) {
1103 _cur_stat_index ++;
1104 } else {
1105 print_statistics();
1106 _cur_stat_index = 0;
1107 tty->print_cr("");
1108 }
1109 }
1110 }
1111
1112 void SafepointSynchronize::print_statistics() {
1113 int index;
1114 SafepointStats* sstats = _safepoint_stats;
1115
1116 for (index = 0; index <= _cur_stat_index; index++) {
1117 sstats = &_safepoint_stats[index];
1118 tty->print("%-28s ["
1119 INT32_FORMAT_W(8)INT32_FORMAT_W(11)INT32_FORMAT_W(15)
1120 "] ",
1121 sstats->_vmop_type == -1 ? "no vm operation" :
1122 VM_Operation::name(sstats->_vmop_type),
1123 sstats->_nof_total_threads,
1124 sstats->_nof_initial_running_threads,
1125 sstats->_nof_threads_wait_to_block);
1126 // "/ MICROUNITS " is to convert the unit from nanos to millis.
1127 tty->print(" ["
1128 INT64_FORMAT_W(6)INT64_FORMAT_W(6)INT64_FORMAT_W(6)
1129 "] "
1130 "["INT64_FORMAT_W(6)INT64_FORMAT_W(9) "] ",
1131 sstats->_time_to_spin / MICROUNITS,
1132 sstats->_time_to_wait_to_block / MICROUNITS,
1133 sstats->_time_to_sync / MICROUNITS,
1134 sstats->_time_to_exec_vmop / MICROUNITS,
1135 sstats->_time_elapsed_since_last_safepoint / MICROUNITS);
1136
1137 if (need_to_track_page_armed_status) {
1138 tty->print(INT32_FORMAT" ", sstats->_page_armed);
1139 }
1140 tty->print_cr(INT32_FORMAT" ", sstats->_nof_threads_hit_page_trap);
1141 }
1142 }
1143
1144 // This method will be called when VM exits. It will first call
1145 // print_statistics to print out the rest of the sampling. Then
1146 // it tries to summarize the sampling.
1147 void SafepointSynchronize::print_stat_on_exit() {
1148 if (_safepoint_stats == NULL) return;
1149
1150 SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
1151
1152 // During VM exit, end_statistics may not get called and in that
1153 // case, if the sync time is less than PrintSafepointStatisticsTimeout,
1154 // don't print it out.
1155 // Approximate the vm op time.
1156 _safepoint_stats[_cur_stat_index]._time_to_exec_vmop =
1157 os::javaTimeNanos() - sync_end_time;
1158
1159 if ( PrintSafepointStatisticsTimeout < 0 ||
1160 spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
1161 print_statistics();
1162 }
1163 tty->print_cr("");
1164
1165 // Print out polling page sampling status.
1166 if (!need_to_track_page_armed_status) {
1167 if (UseCompilerSafepoints) {
1168 tty->print_cr("Polling page always armed");
1169 }
1170 } else {
1171 tty->print_cr("Defer polling page loop count = %d\n",
1172 DeferPollingPageLoopCount);
1173 }
1174
1175 for (int index = 0; index < VM_Operation::VMOp_Terminating; index++) {
1176 if (_safepoint_reasons[index] != 0) {
1177 tty->print_cr("%-26s"UINT64_FORMAT_W(10), VM_Operation::name(index),
1178 _safepoint_reasons[index]);
1179 }
1180 }
1181
1182 tty->print_cr(UINT64_FORMAT_W(5)" VM operations coalesced during safepoint",
1183 _coalesced_vmop_count);
1184 tty->print_cr("Maximum sync time "INT64_FORMAT_W(5)" ms",
1185 _max_sync_time / MICROUNITS);
1186 }
1187
1188 // ------------------------------------------------------------------------------------------------
1189 // Non-product code
1190
1191 #ifndef PRODUCT
1192
1193 void SafepointSynchronize::print_state() {
1194 if (_state == _not_synchronized) {
1195 tty->print_cr("not synchronized");
1196 } else if (_state == _synchronizing || _state == _synchronized) {
1197 tty->print_cr("State: %s", (_state == _synchronizing) ? "synchronizing" :
1198 "synchronized");
1199
1200 for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
1201 cur->safepoint_state()->print();
1202 }
1203 }
1204 }
1205
1206 void SafepointSynchronize::safepoint_msg(const char* format, ...) {
1207 if (ShowSafepointMsgs) {
1208 va_list ap;
1209 va_start(ap, format);
1210 tty->vprint_cr(format, ap);
1211 va_end(ap);
1212 }
1213 }
1214
1215 #endif // !PRODUCT