comparison src/share/vm/runtime/synchronizer.cpp @ 1930:2d26b0046e0d

Merge.
author Thomas Wuerthinger <wuerthinger@ssw.jku.at>
date Tue, 30 Nov 2010 14:53:30 +0100
parents b30a2cd5e3a2 fa83ab460c54
children 06f017f7daa7
comparison
equal deleted inserted replaced
1484:6b7001391c97 1930:2d26b0046e0d
1 /* 1 /*
2 * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. 2 * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
14 * 14 *
15 * You should have received a copy of the GNU General Public License version 15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation, 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 * 18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * CA 95054 USA or visit www.sun.com if you need additional information or 20 * or visit www.oracle.com if you need additional information or have any
21 * have any questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 24
25 # include "incls/_precompiled.incl" 25 # include "incls/_precompiled.incl"
26 # include "incls/_synchronizer.cpp.incl" 26 # include "incls/_synchronizer.cpp.incl"
30 #define ATTR __attribute__((noinline)) 30 #define ATTR __attribute__((noinline))
31 #else 31 #else
32 #define ATTR 32 #define ATTR
33 #endif 33 #endif
34 34
35 // Native markword accessors for synchronization and hashCode().
36 //
37 // The "core" versions of monitor enter and exit reside in this file. 35 // The "core" versions of monitor enter and exit reside in this file.
38 // The interpreter and compilers contain specialized transliterated 36 // The interpreter and compilers contain specialized transliterated
39 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 37 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
40 // for instance. If you make changes here, make sure to modify the 38 // for instance. If you make changes here, make sure to modify the
41 // interpreter, and both C1 and C2 fast-path inline locking code emission. 39 // interpreter, and both C1 and C2 fast-path inline locking code emission.
42 // 40 //
43 // TODO: merge the objectMonitor and synchronizer classes.
44 // 41 //
45 // ----------------------------------------------------------------------------- 42 // -----------------------------------------------------------------------------
46 43
47 #ifdef DTRACE_ENABLED 44 #ifdef DTRACE_ENABLED
48 45
50 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 47 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
51 48
52 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait, 49 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
53 jlong, uintptr_t, char*, int, long); 50 jlong, uintptr_t, char*, int, long);
54 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited, 51 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
55 jlong, uintptr_t, char*, int);
56 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
57 jlong, uintptr_t, char*, int);
58 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
59 jlong, uintptr_t, char*, int);
60 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
61 jlong, uintptr_t, char*, int);
62 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
63 jlong, uintptr_t, char*, int);
64 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
65 jlong, uintptr_t, char*, int); 52 jlong, uintptr_t, char*, int);
66 53
67 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread) \ 54 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread) \
68 char* bytes = NULL; \ 55 char* bytes = NULL; \
69 int len = 0; \ 56 int len = 0; \
97 #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon) {;} 84 #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon) {;}
98 #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon) {;} 85 #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon) {;}
99 86
100 #endif // ndef DTRACE_ENABLED 87 #endif // ndef DTRACE_ENABLED
101 88
102 // ObjectWaiter serves as a "proxy" or surrogate thread. 89 // This exists only as a workaround of dtrace bug 6254741
103 // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific 90 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
104 // ParkEvent instead. Beware, however, that the JVMTI code 91 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
105 // knows about ObjectWaiters, so we'll have to reconcile that code. 92 return 0;
106 // See next_waiter(), first_waiter(), etc. 93 }
107 94
108 class ObjectWaiter : public StackObj { 95 #define NINFLATIONLOCKS 256
109 public: 96 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
110 enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ; 97
111 enum Sorted { PREPEND, APPEND, SORTED } ; 98 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
112 ObjectWaiter * volatile _next; 99 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
113 ObjectWaiter * volatile _prev; 100 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ;
114 Thread* _thread; 101 int ObjectSynchronizer::gOmInUseCount = 0;
115 ParkEvent * _event; 102 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
116 volatile int _notified ; 103 static volatile int MonitorFreeCount = 0 ; // # on gFreeList
117 volatile TStates TState ; 104 static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation
118 Sorted _Sorted ; // List placement disposition 105 #define CHAINMARKER ((oop)-1)
119 bool _active ; // Contention monitoring is enabled 106
120 public: 107 // -----------------------------------------------------------------------------
121 ObjectWaiter(Thread* thread) { 108 // Fast Monitor Enter/Exit
122 _next = NULL; 109 // This the fast monitor enter. The interpreter and compiler use
123 _prev = NULL; 110 // some assembly copies of this code. Make sure update those code
124 _notified = 0; 111 // if the following function is changed. The implementation is
125 TState = TS_RUN ; 112 // extremely sensitive to race condition. Be careful.
126 _thread = thread; 113
127 _event = thread->_ParkEvent ; 114 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
128 _active = false; 115 if (UseBiasedLocking) {
129 assert (_event != NULL, "invariant") ; 116 if (!SafepointSynchronize::is_at_safepoint()) {
130 } 117 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
131 118 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
132 void wait_reenter_begin(ObjectMonitor *mon) { 119 return;
133 JavaThread *jt = (JavaThread *)this->_thread; 120 }
134 _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon); 121 } else {
135 } 122 assert(!attempt_rebias, "can not rebias toward VM thread");
136 123 BiasedLocking::revoke_at_safepoint(obj);
137 void wait_reenter_end(ObjectMonitor *mon) { 124 }
138 JavaThread *jt = (JavaThread *)this->_thread; 125 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
139 JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active); 126 }
140 } 127
141 }; 128 slow_enter (obj, lock, THREAD) ;
142 129 }
143 enum ManifestConstants { 130
144 ClearResponsibleAtSTW = 0, 131 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
145 MaximumRecheckInterval = 1000 132 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
146 } ; 133 // if displaced header is null, the previous enter is recursive enter, no-op
147 134 markOop dhw = lock->displaced_header();
148 135 markOop mark ;
149 #undef TEVENT 136 if (dhw == NULL) {
150 #define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); } 137 // Recursive stack-lock.
151 138 // Diagnostics -- Could be: stack-locked, inflating, inflated.
152 #define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }} 139 mark = object->mark() ;
153 140 assert (!mark->is_neutral(), "invariant") ;
154 #undef TEVENT 141 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
155 #define TEVENT(nom) {;} 142 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
156 143 }
144 if (mark->has_monitor()) {
145 ObjectMonitor * m = mark->monitor() ;
146 assert(((oop)(m->object()))->mark() == mark, "invariant") ;
147 assert(m->is_entered(THREAD), "invariant") ;
148 }
149 return ;
150 }
151
152 mark = object->mark() ;
153
154 // If the object is stack-locked by the current thread, try to
155 // swing the displaced header from the box back to the mark.
156 if (mark == (markOop) lock) {
157 assert (dhw->is_neutral(), "invariant") ;
158 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
159 TEVENT (fast_exit: release stacklock) ;
160 return;
161 }
162 }
163
164 ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
165 }
166
167 // -----------------------------------------------------------------------------
168 // Interpreter/Compiler Slow Case
169 // This routine is used to handle interpreter/compiler slow case
170 // We don't need to use fast path here, because it must have been
171 // failed in the interpreter/compiler code.
172 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
173 markOop mark = obj->mark();
174 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
175
176 if (mark->is_neutral()) {
177 // Anticipate successful CAS -- the ST of the displaced mark must
178 // be visible <= the ST performed by the CAS.
179 lock->set_displaced_header(mark);
180 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
181 TEVENT (slow_enter: release stacklock) ;
182 return ;
183 }
184 // Fall through to inflate() ...
185 } else
186 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
187 assert(lock != mark->locker(), "must not re-lock the same lock");
188 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
189 lock->set_displaced_header(NULL);
190 return;
191 }
192
193 #if 0
194 // The following optimization isn't particularly useful.
195 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
196 lock->set_displaced_header (NULL) ;
197 return ;
198 }
199 #endif
200
201 // The object header will never be displaced to this lock,
202 // so it does not matter what the value is, except that it
203 // must be non-zero to avoid looking like a re-entrant lock,
204 // and must not look locked either.
205 lock->set_displaced_header(markOopDesc::unused_mark());
206 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
207 }
208
209 // This routine is used to handle interpreter/compiler slow case
210 // We don't need to use fast path here, because it must have
211 // failed in the interpreter/compiler code. Simply use the heavy
212 // weight monitor should be ok, unless someone find otherwise.
213 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
214 fast_exit (object, lock, THREAD) ;
215 }
216
217 // -----------------------------------------------------------------------------
218 // Class Loader support to workaround deadlocks on the class loader lock objects
219 // Also used by GC
220 // complete_exit()/reenter() are used to wait on a nested lock
221 // i.e. to give up an outer lock completely and then re-enter
222 // Used when holding nested locks - lock acquisition order: lock1 then lock2
223 // 1) complete_exit lock1 - saving recursion count
224 // 2) wait on lock2
225 // 3) when notified on lock2, unlock lock2
226 // 4) reenter lock1 with original recursion count
227 // 5) lock lock2
228 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
229 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
230 TEVENT (complete_exit) ;
231 if (UseBiasedLocking) {
232 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
233 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
234 }
235
236 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
237
238 return monitor->complete_exit(THREAD);
239 }
240
241 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
242 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
243 TEVENT (reenter) ;
244 if (UseBiasedLocking) {
245 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
246 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
247 }
248
249 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
250
251 monitor->reenter(recursion, THREAD);
252 }
253 // -----------------------------------------------------------------------------
254 // JNI locks on java objects
255 // NOTE: must use heavy weight monitor to handle jni monitor enter
256 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
257 // the current locking is from JNI instead of Java code
258 TEVENT (jni_enter) ;
259 if (UseBiasedLocking) {
260 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
261 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
262 }
263 THREAD->set_current_pending_monitor_is_from_java(false);
264 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
265 THREAD->set_current_pending_monitor_is_from_java(true);
266 }
267
268 // NOTE: must use heavy weight monitor to handle jni monitor enter
269 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
270 if (UseBiasedLocking) {
271 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
272 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
273 }
274
275 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
276 return monitor->try_enter(THREAD);
277 }
278
279
280 // NOTE: must use heavy weight monitor to handle jni monitor exit
281 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
282 TEVENT (jni_exit) ;
283 if (UseBiasedLocking) {
284 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
285 }
286 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
287
288 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
289 // If this thread has locked the object, exit the monitor. Note: can't use
290 // monitor->check(CHECK); must exit even if an exception is pending.
291 if (monitor->check(THREAD)) {
292 monitor->exit(THREAD);
293 }
294 }
295
296 // -----------------------------------------------------------------------------
297 // Internal VM locks on java objects
298 // standard constructor, allows locking failures
299 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
300 _dolock = doLock;
301 _thread = thread;
302 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
303 _obj = obj;
304
305 if (_dolock) {
306 TEVENT (ObjectLocker) ;
307
308 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
309 }
310 }
311
312 ObjectLocker::~ObjectLocker() {
313 if (_dolock) {
314 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
315 }
316 }
317
318
319 // -----------------------------------------------------------------------------
320 // Wait/Notify/NotifyAll
321 // NOTE: must use heavy weight monitor to handle wait()
322 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
323 if (UseBiasedLocking) {
324 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
325 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
326 }
327 if (millis < 0) {
328 TEVENT (wait - throw IAX) ;
329 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
330 }
331 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
332 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
333 monitor->wait(millis, true, THREAD);
334
335 /* This dummy call is in place to get around dtrace bug 6254741. Once
336 that's fixed we can uncomment the following line and remove the call */
337 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
338 dtrace_waited_probe(monitor, obj, THREAD);
339 }
340
341 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
342 if (UseBiasedLocking) {
343 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
344 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
345 }
346 if (millis < 0) {
347 TEVENT (wait - throw IAX) ;
348 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
349 }
350 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
351 }
352
353 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
354 if (UseBiasedLocking) {
355 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
356 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
357 }
358
359 markOop mark = obj->mark();
360 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
361 return;
362 }
363 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
364 }
365
366 // NOTE: see comment of notify()
367 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
368 if (UseBiasedLocking) {
369 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
370 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
371 }
372
373 markOop mark = obj->mark();
374 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
375 return;
376 }
377 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
378 }
379
380 // -----------------------------------------------------------------------------
381 // Hash Code handling
382 //
157 // Performance concern: 383 // Performance concern:
158 // OrderAccess::storestore() calls release() which STs 0 into the global volatile 384 // OrderAccess::storestore() calls release() which STs 0 into the global volatile
159 // OrderAccess::Dummy variable. This store is unnecessary for correctness. 385 // OrderAccess::Dummy variable. This store is unnecessary for correctness.
160 // Many threads STing into a common location causes considerable cache migration 386 // Many threads STing into a common location causes considerable cache migration
161 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore() 387 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore()
183 volatile int hcSequence ; 409 volatile int hcSequence ;
184 double padFinal [8] ; 410 double padFinal [8] ;
185 } ; 411 } ;
186 412
187 static SharedGlobals GVars ; 413 static SharedGlobals GVars ;
188 414 static int MonitorScavengeThreshold = 1000000 ;
189 415 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
190 // Tunables ...
191 // The knob* variables are effectively final. Once set they should
192 // never be modified hence. Consider using __read_mostly with GCC.
193
194 static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins
195 static int Knob_HandOff = 0 ;
196 static int Knob_Verbose = 0 ;
197 static int Knob_ReportSettings = 0 ;
198
199 static int Knob_SpinLimit = 5000 ; // derived by an external tool -
200 static int Knob_SpinBase = 0 ; // Floor AKA SpinMin
201 static int Knob_SpinBackOff = 0 ; // spin-loop backoff
202 static int Knob_CASPenalty = -1 ; // Penalty for failed CAS
203 static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change
204 static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field
205 static int Knob_SpinEarly = 1 ;
206 static int Knob_SuccEnabled = 1 ; // futile wake throttling
207 static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one
208 static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs
209 static int Knob_Bonus = 100 ; // spin success bonus
210 static int Knob_BonusB = 100 ; // spin success bonus
211 static int Knob_Penalty = 200 ; // spin failure penalty
212 static int Knob_Poverty = 1000 ;
213 static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park()
214 static int Knob_FixedSpin = 0 ;
215 static int Knob_OState = 3 ; // Spinner checks thread state of _owner
216 static int Knob_UsePause = 1 ;
217 static int Knob_ExitPolicy = 0 ;
218 static int Knob_PreSpin = 10 ; // 20-100 likely better
219 static int Knob_ResetEvent = 0 ;
220 static int BackOffMask = 0 ;
221
222 static int Knob_FastHSSEC = 0 ;
223 static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee
224 static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline
225 static volatile int InitDone = 0 ;
226
227
228 // hashCode() generation :
229 //
230 // Possibilities:
231 // * MD5Digest of {obj,stwRandom}
232 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
233 // * A DES- or AES-style SBox[] mechanism
234 // * One of the Phi-based schemes, such as:
235 // 2654435761 = 2^32 * Phi (golden ratio)
236 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
237 // * A variation of Marsaglia's shift-xor RNG scheme.
238 // * (obj ^ stwRandom) is appealing, but can result
239 // in undesirable regularity in the hashCode values of adjacent objects
240 // (objects allocated back-to-back, in particular). This could potentially
241 // result in hashtable collisions and reduced hashtable efficiency.
242 // There are simple ways to "diffuse" the middle address bits over the
243 // generated hashCode values:
244 //
245
246 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
247 intptr_t value = 0 ;
248 if (hashCode == 0) {
249 // This form uses an unguarded global Park-Miller RNG,
250 // so it's possible for two threads to race and generate the same RNG.
251 // On MP system we'll have lots of RW access to a global, so the
252 // mechanism induces lots of coherency traffic.
253 value = os::random() ;
254 } else
255 if (hashCode == 1) {
256 // This variation has the property of being stable (idempotent)
257 // between STW operations. This can be useful in some of the 1-0
258 // synchronization schemes.
259 intptr_t addrBits = intptr_t(obj) >> 3 ;
260 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
261 } else
262 if (hashCode == 2) {
263 value = 1 ; // for sensitivity testing
264 } else
265 if (hashCode == 3) {
266 value = ++GVars.hcSequence ;
267 } else
268 if (hashCode == 4) {
269 value = intptr_t(obj) ;
270 } else {
271 // Marsaglia's xor-shift scheme with thread-specific state
272 // This is probably the best overall implementation -- we'll
273 // likely make this the default in future releases.
274 unsigned t = Self->_hashStateX ;
275 t ^= (t << 11) ;
276 Self->_hashStateX = Self->_hashStateY ;
277 Self->_hashStateY = Self->_hashStateZ ;
278 Self->_hashStateZ = Self->_hashStateW ;
279 unsigned v = Self->_hashStateW ;
280 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
281 Self->_hashStateW = v ;
282 value = v ;
283 }
284
285 value &= markOopDesc::hash_mask;
286 if (value == 0) value = 0xBAD ;
287 assert (value != markOopDesc::no_hash, "invariant") ;
288 TEVENT (hashCode: GENERATE) ;
289 return value;
290 }
291
292 void BasicLock::print_on(outputStream* st) const {
293 st->print("monitor");
294 }
295
296 void BasicLock::move_to(oop obj, BasicLock* dest) {
297 // Check to see if we need to inflate the lock. This is only needed
298 // if an object is locked using "this" lightweight monitor. In that
299 // case, the displaced_header() is unlocked, because the
300 // displaced_header() contains the header for the originally unlocked
301 // object. However the object could have already been inflated. But it
302 // does not matter, the inflation will just a no-op. For other cases,
303 // the displaced header will be either 0x0 or 0x3, which are location
304 // independent, therefore the BasicLock is free to move.
305 //
306 // During OSR we may need to relocate a BasicLock (which contains a
307 // displaced word) from a location in an interpreter frame to a
308 // new location in a compiled frame. "this" refers to the source
309 // basiclock in the interpreter frame. "dest" refers to the destination
310 // basiclock in the new compiled frame. We *always* inflate in move_to().
311 // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
312 // cause performance problems in code that makes heavy use of a small # of
313 // uncontended locks. (We'd inflate during OSR, and then sync performance
314 // would subsequently plummet because the thread would be forced thru the slow-path).
315 // This problem has been made largely moot on IA32 by inlining the inflated fast-path
316 // operations in Fast_Lock and Fast_Unlock in i486.ad.
317 //
318 // Note that there is a way to safely swing the object's markword from
319 // one stack location to another. This avoids inflation. Obviously,
320 // we need to ensure that both locations refer to the current thread's stack.
321 // There are some subtle concurrency issues, however, and since the benefit is
322 // is small (given the support for inflated fast-path locking in the fast_lock, etc)
323 // we'll leave that optimization for another time.
324
325 if (displaced_header()->is_neutral()) {
326 ObjectSynchronizer::inflate_helper(obj);
327 // WARNING: We can not put check here, because the inflation
328 // will not update the displaced header. Once BasicLock is inflated,
329 // no one should ever look at its content.
330 } else {
331 // Typically the displaced header will be 0 (recursive stack lock) or
332 // unused_mark. Naively we'd like to assert that the displaced mark
333 // value is either 0, neutral, or 3. But with the advent of the
334 // store-before-CAS avoidance in fast_lock/compiler_lock_object
335 // we can find any flavor mark in the displaced mark.
336 }
337 // [RGV] The next line appears to do nothing!
338 intptr_t dh = (intptr_t) displaced_header();
339 dest->set_displaced_header(displaced_header());
340 }
341
342 // -----------------------------------------------------------------------------
343
344 // standard constructor, allows locking failures
345 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
346 _dolock = doLock;
347 _thread = thread;
348 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
349 _obj = obj;
350
351 if (_dolock) {
352 TEVENT (ObjectLocker) ;
353
354 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
355 }
356 }
357
358 ObjectLocker::~ObjectLocker() {
359 if (_dolock) {
360 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
361 }
362 }
363
364 // -----------------------------------------------------------------------------
365
366
367 PerfCounter * ObjectSynchronizer::_sync_Inflations = NULL ;
368 PerfCounter * ObjectSynchronizer::_sync_Deflations = NULL ;
369 PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts = NULL ;
370 PerfCounter * ObjectSynchronizer::_sync_FutileWakeups = NULL ;
371 PerfCounter * ObjectSynchronizer::_sync_Parks = NULL ;
372 PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications = NULL ;
373 PerfCounter * ObjectSynchronizer::_sync_Notifications = NULL ;
374 PerfCounter * ObjectSynchronizer::_sync_PrivateA = NULL ;
375 PerfCounter * ObjectSynchronizer::_sync_PrivateB = NULL ;
376 PerfCounter * ObjectSynchronizer::_sync_SlowExit = NULL ;
377 PerfCounter * ObjectSynchronizer::_sync_SlowEnter = NULL ;
378 PerfCounter * ObjectSynchronizer::_sync_SlowNotify = NULL ;
379 PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll = NULL ;
380 PerfCounter * ObjectSynchronizer::_sync_FailedSpins = NULL ;
381 PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins = NULL ;
382 PerfCounter * ObjectSynchronizer::_sync_MonInCirculation = NULL ;
383 PerfCounter * ObjectSynchronizer::_sync_MonScavenged = NULL ;
384 PerfLongVariable * ObjectSynchronizer::_sync_MonExtant = NULL ;
385
386 // One-shot global initialization for the sync subsystem.
387 // We could also defer initialization and initialize on-demand
388 // the first time we call inflate(). Initialization would
389 // be protected - like so many things - by the MonitorCache_lock.
390
391 void ObjectSynchronizer::Initialize () {
392 static int InitializationCompleted = 0 ;
393 assert (InitializationCompleted == 0, "invariant") ;
394 InitializationCompleted = 1 ;
395 if (UsePerfData) {
396 EXCEPTION_MARK ;
397 #define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
398 #define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
399 NEWPERFCOUNTER(_sync_Inflations) ;
400 NEWPERFCOUNTER(_sync_Deflations) ;
401 NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
402 NEWPERFCOUNTER(_sync_FutileWakeups) ;
403 NEWPERFCOUNTER(_sync_Parks) ;
404 NEWPERFCOUNTER(_sync_EmptyNotifications) ;
405 NEWPERFCOUNTER(_sync_Notifications) ;
406 NEWPERFCOUNTER(_sync_SlowEnter) ;
407 NEWPERFCOUNTER(_sync_SlowExit) ;
408 NEWPERFCOUNTER(_sync_SlowNotify) ;
409 NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
410 NEWPERFCOUNTER(_sync_FailedSpins) ;
411 NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
412 NEWPERFCOUNTER(_sync_PrivateA) ;
413 NEWPERFCOUNTER(_sync_PrivateB) ;
414 NEWPERFCOUNTER(_sync_MonInCirculation) ;
415 NEWPERFCOUNTER(_sync_MonScavenged) ;
416 NEWPERFVARIABLE(_sync_MonExtant) ;
417 #undef NEWPERFCOUNTER
418 }
419 }
420
421 // Compile-time asserts
422 // When possible, it's better to catch errors deterministically at
423 // compile-time than at runtime. The down-side to using compile-time
424 // asserts is that error message -- often something about negative array
425 // indices -- is opaque.
426
427 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
428
429 void ObjectMonitor::ctAsserts() {
430 CTASSERT(offset_of (ObjectMonitor, _header) == 0);
431 }
432
433 static int Adjust (volatile int * adr, int dx) {
434 int v ;
435 for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
436 return v ;
437 }
438
439 // Ad-hoc mutual exclusion primitives: SpinLock and Mux
440 //
441 // We employ SpinLocks _only for low-contention, fixed-length
442 // short-duration critical sections where we're concerned
443 // about native mutex_t or HotSpot Mutex:: latency.
444 // The mux construct provides a spin-then-block mutual exclusion
445 // mechanism.
446 //
447 // Testing has shown that contention on the ListLock guarding gFreeList
448 // is common. If we implement ListLock as a simple SpinLock it's common
449 // for the JVM to devolve to yielding with little progress. This is true
450 // despite the fact that the critical sections protected by ListLock are
451 // extremely short.
452 //
453 // TODO-FIXME: ListLock should be of type SpinLock.
454 // We should make this a 1st-class type, integrated into the lock
455 // hierarchy as leaf-locks. Critically, the SpinLock structure
456 // should have sufficient padding to avoid false-sharing and excessive
457 // cache-coherency traffic.
458
459
460 typedef volatile int SpinLockT ;
461
462 void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
463 if (Atomic::cmpxchg (1, adr, 0) == 0) {
464 return ; // normal fast-path return
465 }
466
467 // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
468 TEVENT (SpinAcquire - ctx) ;
469 int ctr = 0 ;
470 int Yields = 0 ;
471 for (;;) {
472 while (*adr != 0) {
473 ++ctr ;
474 if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
475 if (Yields > 5) {
476 // Consider using a simple NakedSleep() instead.
477 // Then SpinAcquire could be called by non-JVM threads
478 Thread::current()->_ParkEvent->park(1) ;
479 } else {
480 os::NakedYield() ;
481 ++Yields ;
482 }
483 } else {
484 SpinPause() ;
485 }
486 }
487 if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
488 }
489 }
490
491 void Thread::SpinRelease (volatile int * adr) {
492 assert (*adr != 0, "invariant") ;
493 OrderAccess::fence() ; // guarantee at least release consistency.
494 // Roach-motel semantics.
495 // It's safe if subsequent LDs and STs float "up" into the critical section,
496 // but prior LDs and STs within the critical section can't be allowed
497 // to reorder or float past the ST that releases the lock.
498 *adr = 0 ;
499 }
500
501 // muxAcquire and muxRelease:
502 //
503 // * muxAcquire and muxRelease support a single-word lock-word construct.
504 // The LSB of the word is set IFF the lock is held.
505 // The remainder of the word points to the head of a singly-linked list
506 // of threads blocked on the lock.
507 //
508 // * The current implementation of muxAcquire-muxRelease uses its own
509 // dedicated Thread._MuxEvent instance. If we're interested in
510 // minimizing the peak number of extant ParkEvent instances then
511 // we could eliminate _MuxEvent and "borrow" _ParkEvent as long
512 // as certain invariants were satisfied. Specifically, care would need
513 // to be taken with regards to consuming unpark() "permits".
514 // A safe rule of thumb is that a thread would never call muxAcquire()
515 // if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
516 // park(). Otherwise the _ParkEvent park() operation in muxAcquire() could
517 // consume an unpark() permit intended for monitorenter, for instance.
518 // One way around this would be to widen the restricted-range semaphore
519 // implemented in park(). Another alternative would be to provide
520 // multiple instances of the PlatformEvent() for each thread. One
521 // instance would be dedicated to muxAcquire-muxRelease, for instance.
522 //
523 // * Usage:
524 // -- Only as leaf locks
525 // -- for short-term locking only as muxAcquire does not perform
526 // thread state transitions.
527 //
528 // Alternatives:
529 // * We could implement muxAcquire and muxRelease with MCS or CLH locks
530 // but with parking or spin-then-park instead of pure spinning.
531 // * Use Taura-Oyama-Yonenzawa locks.
532 // * It's possible to construct a 1-0 lock if we encode the lockword as
533 // (List,LockByte). Acquire will CAS the full lockword while Release
534 // will STB 0 into the LockByte. The 1-0 scheme admits stranding, so
535 // acquiring threads use timers (ParkTimed) to detect and recover from
536 // the stranding window. Thread/Node structures must be aligned on 256-byte
537 // boundaries by using placement-new.
538 // * Augment MCS with advisory back-link fields maintained with CAS().
539 // Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
540 // The validity of the backlinks must be ratified before we trust the value.
541 // If the backlinks are invalid the exiting thread must back-track through the
542 // the forward links, which are always trustworthy.
543 // * Add a successor indication. The LockWord is currently encoded as
544 // (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable
545 // to provide the usual futile-wakeup optimization.
546 // See RTStt for details.
547 // * Consider schedctl.sc_nopreempt to cover the critical section.
548 //
549
550
551 typedef volatile intptr_t MutexT ; // Mux Lock-word
552 enum MuxBits { LOCKBIT = 1 } ;
553
554 void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
555 intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
556 if (w == 0) return ;
557 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
558 return ;
559 }
560
561 TEVENT (muxAcquire - Contention) ;
562 ParkEvent * const Self = Thread::current()->_MuxEvent ;
563 assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
564 for (;;) {
565 int its = (os::is_MP() ? 100 : 0) + 1 ;
566
567 // Optional spin phase: spin-then-park strategy
568 while (--its >= 0) {
569 w = *Lock ;
570 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
571 return ;
572 }
573 }
574
575 Self->reset() ;
576 Self->OnList = intptr_t(Lock) ;
577 // The following fence() isn't _strictly necessary as the subsequent
578 // CAS() both serializes execution and ratifies the fetched *Lock value.
579 OrderAccess::fence();
580 for (;;) {
581 w = *Lock ;
582 if ((w & LOCKBIT) == 0) {
583 if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
584 Self->OnList = 0 ; // hygiene - allows stronger asserts
585 return ;
586 }
587 continue ; // Interference -- *Lock changed -- Just retry
588 }
589 assert (w & LOCKBIT, "invariant") ;
590 Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
591 if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
592 }
593
594 while (Self->OnList != 0) {
595 Self->park() ;
596 }
597 }
598 }
599
600 void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
601 intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
602 if (w == 0) return ;
603 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
604 return ;
605 }
606
607 TEVENT (muxAcquire - Contention) ;
608 ParkEvent * ReleaseAfter = NULL ;
609 if (ev == NULL) {
610 ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
611 }
612 assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
613 for (;;) {
614 guarantee (ev->OnList == 0, "invariant") ;
615 int its = (os::is_MP() ? 100 : 0) + 1 ;
616
617 // Optional spin phase: spin-then-park strategy
618 while (--its >= 0) {
619 w = *Lock ;
620 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
621 if (ReleaseAfter != NULL) {
622 ParkEvent::Release (ReleaseAfter) ;
623 }
624 return ;
625 }
626 }
627
628 ev->reset() ;
629 ev->OnList = intptr_t(Lock) ;
630 // The following fence() isn't _strictly necessary as the subsequent
631 // CAS() both serializes execution and ratifies the fetched *Lock value.
632 OrderAccess::fence();
633 for (;;) {
634 w = *Lock ;
635 if ((w & LOCKBIT) == 0) {
636 if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
637 ev->OnList = 0 ;
638 // We call ::Release while holding the outer lock, thus
639 // artificially lengthening the critical section.
640 // Consider deferring the ::Release() until the subsequent unlock(),
641 // after we've dropped the outer lock.
642 if (ReleaseAfter != NULL) {
643 ParkEvent::Release (ReleaseAfter) ;
644 }
645 return ;
646 }
647 continue ; // Interference -- *Lock changed -- Just retry
648 }
649 assert (w & LOCKBIT, "invariant") ;
650 ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
651 if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
652 }
653
654 while (ev->OnList != 0) {
655 ev->park() ;
656 }
657 }
658 }
659
660 // Release() must extract a successor from the list and then wake that thread.
661 // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
662 // similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based
663 // Release() would :
664 // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
665 // (B) Extract a successor from the private list "in-hand"
666 // (C) attempt to CAS() the residual back into *Lock over null.
667 // If there were any newly arrived threads and the CAS() would fail.
668 // In that case Release() would detach the RATs, re-merge the list in-hand
669 // with the RATs and repeat as needed. Alternately, Release() might
670 // detach and extract a successor, but then pass the residual list to the wakee.
671 // The wakee would be responsible for reattaching and remerging before it
672 // competed for the lock.
673 //
674 // Both "pop" and DMR are immune from ABA corruption -- there can be
675 // multiple concurrent pushers, but only one popper or detacher.
676 // This implementation pops from the head of the list. This is unfair,
677 // but tends to provide excellent throughput as hot threads remain hot.
678 // (We wake recently run threads first).
679
680 void Thread::muxRelease (volatile intptr_t * Lock) {
681 for (;;) {
682 const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
683 assert (w & LOCKBIT, "invariant") ;
684 if (w == LOCKBIT) return ;
685 ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
686 assert (List != NULL, "invariant") ;
687 assert (List->OnList == intptr_t(Lock), "invariant") ;
688 ParkEvent * nxt = List->ListNext ;
689
690 // The following CAS() releases the lock and pops the head element.
691 if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
692 continue ;
693 }
694 List->OnList = 0 ;
695 OrderAccess::fence() ;
696 List->unpark () ;
697 return ;
698 }
699 }
700
701 // ObjectMonitor Lifecycle
702 // -----------------------
703 // Inflation unlinks monitors from the global gFreeList and
704 // associates them with objects. Deflation -- which occurs at
705 // STW-time -- disassociates idle monitors from objects. Such
706 // scavenged monitors are returned to the gFreeList.
707 //
708 // The global list is protected by ListLock. All the critical sections
709 // are short and operate in constant-time.
710 //
711 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
712 //
713 // Lifecycle:
714 // -- unassigned and on the global free list
715 // -- unassigned and on a thread's private omFreeList
716 // -- assigned to an object. The object is inflated and the mark refers
717 // to the objectmonitor.
718 //
719 // TODO-FIXME:
720 //
721 // * We currently protect the gFreeList with a simple lock.
722 // An alternate lock-free scheme would be to pop elements from the gFreeList
723 // with CAS. This would be safe from ABA corruption as long we only
724 // recycled previously appearing elements onto the list in deflate_idle_monitors()
725 // at STW-time. Completely new elements could always be pushed onto the gFreeList
726 // with CAS. Elements that appeared previously on the list could only
727 // be installed at STW-time.
728 //
729 // * For efficiency and to help reduce the store-before-CAS penalty
730 // the objectmonitors on gFreeList or local free lists should be ready to install
731 // with the exception of _header and _object. _object can be set after inflation.
732 // In particular, keep all objectMonitors on a thread's private list in ready-to-install
733 // state with m.Owner set properly.
734 //
735 // * We could all diffuse contention by using multiple global (FreeList, Lock)
736 // pairs -- threads could use trylock() and a cyclic-scan strategy to search for
737 // an unlocked free list.
738 //
739 // * Add lifecycle tags and assert()s.
740 //
741 // * Be more consistent about when we clear an objectmonitor's fields:
742 // A. After extracting the objectmonitor from a free list.
743 // B. After adding an objectmonitor to a free list.
744 //
745
746 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
747 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
748 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
749 #define CHAINMARKER ((oop)-1)
750
751 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
752 // A large MAXPRIVATE value reduces both list lock contention
753 // and list coherency traffic, but also tends to increase the
754 // number of objectMonitors in circulation as well as the STW
755 // scavenge costs. As usual, we lean toward time in space-time
756 // tradeoffs.
757 const int MAXPRIVATE = 1024 ;
758 for (;;) {
759 ObjectMonitor * m ;
760
761 // 1: try to allocate from the thread's local omFreeList.
762 // Threads will attempt to allocate first from their local list, then
763 // from the global list, and only after those attempts fail will the thread
764 // attempt to instantiate new monitors. Thread-local free lists take
765 // heat off the ListLock and improve allocation latency, as well as reducing
766 // coherency traffic on the shared global list.
767 m = Self->omFreeList ;
768 if (m != NULL) {
769 Self->omFreeList = m->FreeNext ;
770 Self->omFreeCount -- ;
771 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
772 guarantee (m->object() == NULL, "invariant") ;
773 return m ;
774 }
775
776 // 2: try to allocate from the global gFreeList
777 // CONSIDER: use muxTry() instead of muxAcquire().
778 // If the muxTry() fails then drop immediately into case 3.
779 // If we're using thread-local free lists then try
780 // to reprovision the caller's free list.
781 if (gFreeList != NULL) {
782 // Reprovision the thread's omFreeList.
783 // Use bulk transfers to reduce the allocation rate and heat
784 // on various locks.
785 Thread::muxAcquire (&ListLock, "omAlloc") ;
786 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
787 ObjectMonitor * take = gFreeList ;
788 gFreeList = take->FreeNext ;
789 guarantee (take->object() == NULL, "invariant") ;
790 guarantee (!take->is_busy(), "invariant") ;
791 take->Recycle() ;
792 omRelease (Self, take) ;
793 }
794 Thread::muxRelease (&ListLock) ;
795 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
796 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
797 TEVENT (omFirst - reprovision) ;
798 continue ;
799 }
800
801 // 3: allocate a block of new ObjectMonitors
802 // Both the local and global free lists are empty -- resort to malloc().
803 // In the current implementation objectMonitors are TSM - immortal.
804 assert (_BLOCKSIZE > 1, "invariant") ;
805 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
806
807 // NOTE: (almost) no way to recover if allocation failed.
808 // We might be able to induce a STW safepoint and scavenge enough
809 // objectMonitors to permit progress.
810 if (temp == NULL) {
811 vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ;
812 }
813
814 // Format the block.
815 // initialize the linked list, each monitor points to its next
816 // forming the single linked free list, the very first monitor
817 // will points to next block, which forms the block list.
818 // The trick of using the 1st element in the block as gBlockList
819 // linkage should be reconsidered. A better implementation would
820 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
821
822 for (int i = 1; i < _BLOCKSIZE ; i++) {
823 temp[i].FreeNext = &temp[i+1];
824 }
825
826 // terminate the last monitor as the end of list
827 temp[_BLOCKSIZE - 1].FreeNext = NULL ;
828
829 // Element [0] is reserved for global list linkage
830 temp[0].set_object(CHAINMARKER);
831
832 // Consider carving out this thread's current request from the
833 // block in hand. This avoids some lock traffic and redundant
834 // list activity.
835
836 // Acquire the ListLock to manipulate BlockList and FreeList.
837 // An Oyama-Taura-Yonezawa scheme might be more efficient.
838 Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
839
840 // Add the new block to the list of extant blocks (gBlockList).
841 // The very first objectMonitor in a block is reserved and dedicated.
842 // It serves as blocklist "next" linkage.
843 temp[0].FreeNext = gBlockList;
844 gBlockList = temp;
845
846 // Add the new string of objectMonitors to the global free list
847 temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
848 gFreeList = temp + 1;
849 Thread::muxRelease (&ListLock) ;
850 TEVENT (Allocate block of monitors) ;
851 }
852 }
853
854 // Place "m" on the caller's private per-thread omFreeList.
855 // In practice there's no need to clamp or limit the number of
856 // monitors on a thread's omFreeList as the only time we'll call
857 // omRelease is to return a monitor to the free list after a CAS
858 // attempt failed. This doesn't allow unbounded #s of monitors to
859 // accumulate on a thread's free list.
860 //
861 // In the future the usage of omRelease() might change and monitors
862 // could migrate between free lists. In that case to avoid excessive
863 // accumulation we could limit omCount to (omProvision*2), otherwise return
864 // the objectMonitor to the global list. We should drain (return) in reasonable chunks.
865 // That is, *not* one-at-a-time.
866
867
868 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) {
869 guarantee (m->object() == NULL, "invariant") ;
870 m->FreeNext = Self->omFreeList ;
871 Self->omFreeList = m ;
872 Self->omFreeCount ++ ;
873 }
874
875 // Return the monitors of a moribund thread's local free list to
876 // the global free list. Typically a thread calls omFlush() when
877 // it's dying. We could also consider having the VM thread steal
878 // monitors from threads that have not run java code over a few
879 // consecutive STW safepoints. Relatedly, we might decay
880 // omFreeProvision at STW safepoints.
881 //
882 // We currently call omFlush() from the Thread:: dtor _after the thread
883 // has been excised from the thread list and is no longer a mutator.
884 // That means that omFlush() can run concurrently with a safepoint and
885 // the scavenge operator. Calling omFlush() from JavaThread::exit() might
886 // be a better choice as we could safely reason that that the JVM is
887 // not at a safepoint at the time of the call, and thus there could
888 // be not inopportune interleavings between omFlush() and the scavenge
889 // operator.
890
891 void ObjectSynchronizer::omFlush (Thread * Self) {
892 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
893 Self->omFreeList = NULL ;
894 if (List == NULL) return ;
895 ObjectMonitor * Tail = NULL ;
896 ObjectMonitor * s ;
897 for (s = List ; s != NULL ; s = s->FreeNext) {
898 Tail = s ;
899 guarantee (s->object() == NULL, "invariant") ;
900 guarantee (!s->is_busy(), "invariant") ;
901 s->set_owner (NULL) ; // redundant but good hygiene
902 TEVENT (omFlush - Move one) ;
903 }
904
905 guarantee (Tail != NULL && List != NULL, "invariant") ;
906 Thread::muxAcquire (&ListLock, "omFlush") ;
907 Tail->FreeNext = gFreeList ;
908 gFreeList = List ;
909 Thread::muxRelease (&ListLock) ;
910 TEVENT (omFlush) ;
911 }
912
913
914 // Get the next block in the block list.
915 static inline ObjectMonitor* next(ObjectMonitor* block) {
916 assert(block->object() == CHAINMARKER, "must be a block header");
917 block = block->FreeNext ;
918 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
919 return block;
920 }
921
922 // Fast path code shared by multiple functions
923 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
924 markOop mark = obj->mark();
925 if (mark->has_monitor()) {
926 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
927 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
928 return mark->monitor();
929 }
930 return ObjectSynchronizer::inflate(Thread::current(), obj);
931 }
932
933 // Note that we could encounter some performance loss through false-sharing as
934 // multiple locks occupy the same $ line. Padding might be appropriate.
935
936 #define NINFLATIONLOCKS 256
937 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
938 416
939 static markOop ReadStableMark (oop obj) { 417 static markOop ReadStableMark (oop obj) {
940 markOop mark = obj->mark() ; 418 markOop mark = obj->mark() ;
941 if (!mark->is_being_inflated()) { 419 if (!mark->is_being_inflated()) {
942 return mark ; // normal fast-path return 420 return mark ; // normal fast-path return
1002 SpinPause() ; // SMP-polite spinning 480 SpinPause() ; // SMP-polite spinning
1003 } 481 }
1004 } 482 }
1005 } 483 }
1006 484
485 // hashCode() generation :
486 //
487 // Possibilities:
488 // * MD5Digest of {obj,stwRandom}
489 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
490 // * A DES- or AES-style SBox[] mechanism
491 // * One of the Phi-based schemes, such as:
492 // 2654435761 = 2^32 * Phi (golden ratio)
493 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
494 // * A variation of Marsaglia's shift-xor RNG scheme.
495 // * (obj ^ stwRandom) is appealing, but can result
496 // in undesirable regularity in the hashCode values of adjacent objects
497 // (objects allocated back-to-back, in particular). This could potentially
498 // result in hashtable collisions and reduced hashtable efficiency.
499 // There are simple ways to "diffuse" the middle address bits over the
500 // generated hashCode values:
501 //
502
503 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
504 intptr_t value = 0 ;
505 if (hashCode == 0) {
506 // This form uses an unguarded global Park-Miller RNG,
507 // so it's possible for two threads to race and generate the same RNG.
508 // On MP system we'll have lots of RW access to a global, so the
509 // mechanism induces lots of coherency traffic.
510 value = os::random() ;
511 } else
512 if (hashCode == 1) {
513 // This variation has the property of being stable (idempotent)
514 // between STW operations. This can be useful in some of the 1-0
515 // synchronization schemes.
516 intptr_t addrBits = intptr_t(obj) >> 3 ;
517 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
518 } else
519 if (hashCode == 2) {
520 value = 1 ; // for sensitivity testing
521 } else
522 if (hashCode == 3) {
523 value = ++GVars.hcSequence ;
524 } else
525 if (hashCode == 4) {
526 value = intptr_t(obj) ;
527 } else {
528 // Marsaglia's xor-shift scheme with thread-specific state
529 // This is probably the best overall implementation -- we'll
530 // likely make this the default in future releases.
531 unsigned t = Self->_hashStateX ;
532 t ^= (t << 11) ;
533 Self->_hashStateX = Self->_hashStateY ;
534 Self->_hashStateY = Self->_hashStateZ ;
535 Self->_hashStateZ = Self->_hashStateW ;
536 unsigned v = Self->_hashStateW ;
537 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
538 Self->_hashStateW = v ;
539 value = v ;
540 }
541
542 value &= markOopDesc::hash_mask;
543 if (value == 0) value = 0xBAD ;
544 assert (value != markOopDesc::no_hash, "invariant") ;
545 TEVENT (hashCode: GENERATE) ;
546 return value;
547 }
548 //
549 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
550 if (UseBiasedLocking) {
551 // NOTE: many places throughout the JVM do not expect a safepoint
552 // to be taken here, in particular most operations on perm gen
553 // objects. However, we only ever bias Java instances and all of
554 // the call sites of identity_hash that might revoke biases have
555 // been checked to make sure they can handle a safepoint. The
556 // added check of the bias pattern is to avoid useless calls to
557 // thread-local storage.
558 if (obj->mark()->has_bias_pattern()) {
559 // Box and unbox the raw reference just in case we cause a STW safepoint.
560 Handle hobj (Self, obj) ;
561 // Relaxing assertion for bug 6320749.
562 assert (Universe::verify_in_progress() ||
563 !SafepointSynchronize::is_at_safepoint(),
564 "biases should not be seen by VM thread here");
565 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
566 obj = hobj() ;
567 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
568 }
569 }
570
571 // hashCode() is a heap mutator ...
572 // Relaxing assertion for bug 6320749.
573 assert (Universe::verify_in_progress() ||
574 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
575 assert (Universe::verify_in_progress() ||
576 Self->is_Java_thread() , "invariant") ;
577 assert (Universe::verify_in_progress() ||
578 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
579
580 ObjectMonitor* monitor = NULL;
581 markOop temp, test;
582 intptr_t hash;
583 markOop mark = ReadStableMark (obj);
584
585 // object should remain ineligible for biased locking
586 assert (!mark->has_bias_pattern(), "invariant") ;
587
588 if (mark->is_neutral()) {
589 hash = mark->hash(); // this is a normal header
590 if (hash) { // if it has hash, just return it
591 return hash;
592 }
593 hash = get_next_hash(Self, obj); // allocate a new hash code
594 temp = mark->copy_set_hash(hash); // merge the hash code into header
595 // use (machine word version) atomic operation to install the hash
596 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
597 if (test == mark) {
598 return hash;
599 }
600 // If atomic operation failed, we must inflate the header
601 // into heavy weight monitor. We could add more code here
602 // for fast path, but it does not worth the complexity.
603 } else if (mark->has_monitor()) {
604 monitor = mark->monitor();
605 temp = monitor->header();
606 assert (temp->is_neutral(), "invariant") ;
607 hash = temp->hash();
608 if (hash) {
609 return hash;
610 }
611 // Skip to the following code to reduce code size
612 } else if (Self->is_lock_owned((address)mark->locker())) {
613 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
614 assert (temp->is_neutral(), "invariant") ;
615 hash = temp->hash(); // by current thread, check if the displaced
616 if (hash) { // header contains hash code
617 return hash;
618 }
619 // WARNING:
620 // The displaced header is strictly immutable.
621 // It can NOT be changed in ANY cases. So we have
622 // to inflate the header into heavyweight monitor
623 // even the current thread owns the lock. The reason
624 // is the BasicLock (stack slot) will be asynchronously
625 // read by other threads during the inflate() function.
626 // Any change to stack may not propagate to other threads
627 // correctly.
628 }
629
630 // Inflate the monitor to set hash code
631 monitor = ObjectSynchronizer::inflate(Self, obj);
632 // Load displaced header and check it has hash code
633 mark = monitor->header();
634 assert (mark->is_neutral(), "invariant") ;
635 hash = mark->hash();
636 if (hash == 0) {
637 hash = get_next_hash(Self, obj);
638 temp = mark->copy_set_hash(hash); // merge hash code into header
639 assert (temp->is_neutral(), "invariant") ;
640 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
641 if (test != mark) {
642 // The only update to the header in the monitor (outside GC)
643 // is install the hash code. If someone add new usage of
644 // displaced header, please update this code
645 hash = test->hash();
646 assert (test->is_neutral(), "invariant") ;
647 assert (hash != 0, "Trivial unexpected object/monitor header usage.");
648 }
649 }
650 // We finally get the hash
651 return hash;
652 }
653
654 // Deprecated -- use FastHashCode() instead.
655
656 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
657 return FastHashCode (Thread::current(), obj()) ;
658 }
659
660
661 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
662 Handle h_obj) {
663 if (UseBiasedLocking) {
664 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
665 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
666 }
667
668 assert(thread == JavaThread::current(), "Can only be called on current thread");
669 oop obj = h_obj();
670
671 markOop mark = ReadStableMark (obj) ;
672
673 // Uncontended case, header points to stack
674 if (mark->has_locker()) {
675 return thread->is_lock_owned((address)mark->locker());
676 }
677 // Contended case, header points to ObjectMonitor (tagged pointer)
678 if (mark->has_monitor()) {
679 ObjectMonitor* monitor = mark->monitor();
680 return monitor->is_entered(thread) != 0 ;
681 }
682 // Unlocked case, header in place
683 assert(mark->is_neutral(), "sanity check");
684 return false;
685 }
686
687 // Be aware of this method could revoke bias of the lock object.
688 // This method querys the ownership of the lock handle specified by 'h_obj'.
689 // If the current thread owns the lock, it returns owner_self. If no
690 // thread owns the lock, it returns owner_none. Otherwise, it will return
691 // ower_other.
692 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
693 (JavaThread *self, Handle h_obj) {
694 // The caller must beware this method can revoke bias, and
695 // revocation can result in a safepoint.
696 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
697 assert (self->thread_state() != _thread_blocked , "invariant") ;
698
699 // Possible mark states: neutral, biased, stack-locked, inflated
700
701 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
702 // CASE: biased
703 BiasedLocking::revoke_and_rebias(h_obj, false, self);
704 assert(!h_obj->mark()->has_bias_pattern(),
705 "biases should be revoked by now");
706 }
707
708 assert(self == JavaThread::current(), "Can only be called on current thread");
709 oop obj = h_obj();
710 markOop mark = ReadStableMark (obj) ;
711
712 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
713 if (mark->has_locker()) {
714 return self->is_lock_owned((address)mark->locker()) ?
715 owner_self : owner_other;
716 }
717
718 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
719 // The Object:ObjectMonitor relationship is stable as long as we're
720 // not at a safepoint.
721 if (mark->has_monitor()) {
722 void * owner = mark->monitor()->_owner ;
723 if (owner == NULL) return owner_none ;
724 return (owner == self ||
725 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
726 }
727
728 // CASE: neutral
729 assert(mark->is_neutral(), "sanity check");
730 return owner_none ; // it's unlocked
731 }
732
733 // FIXME: jvmti should call this
734 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
735 if (UseBiasedLocking) {
736 if (SafepointSynchronize::is_at_safepoint()) {
737 BiasedLocking::revoke_at_safepoint(h_obj);
738 } else {
739 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
740 }
741 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
742 }
743
744 oop obj = h_obj();
745 address owner = NULL;
746
747 markOop mark = ReadStableMark (obj) ;
748
749 // Uncontended case, header points to stack
750 if (mark->has_locker()) {
751 owner = (address) mark->locker();
752 }
753
754 // Contended case, header points to ObjectMonitor (tagged pointer)
755 if (mark->has_monitor()) {
756 ObjectMonitor* monitor = mark->monitor();
757 assert(monitor != NULL, "monitor should be non-null");
758 owner = (address) monitor->owner();
759 }
760
761 if (owner != NULL) {
762 return Threads::owning_thread_from_monitor_owner(owner, doLock);
763 }
764
765 // Unlocked case, header in place
766 // Cannot have assertion since this object may have been
767 // locked by another thread when reaching here.
768 // assert(mark->is_neutral(), "sanity check");
769
770 return NULL;
771 }
772 // Visitors ...
773
774 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
775 ObjectMonitor* block = gBlockList;
776 ObjectMonitor* mid;
777 while (block) {
778 assert(block->object() == CHAINMARKER, "must be a block header");
779 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
780 mid = block + i;
781 oop object = (oop) mid->object();
782 if (object != NULL) {
783 closure->do_monitor(mid);
784 }
785 }
786 block = (ObjectMonitor*) block->FreeNext;
787 }
788 }
789
790 // Get the next block in the block list.
791 static inline ObjectMonitor* next(ObjectMonitor* block) {
792 assert(block->object() == CHAINMARKER, "must be a block header");
793 block = block->FreeNext ;
794 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
795 return block;
796 }
797
798
799 void ObjectSynchronizer::oops_do(OopClosure* f) {
800 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
801 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
802 assert(block->object() == CHAINMARKER, "must be a block header");
803 for (int i = 1; i < _BLOCKSIZE; i++) {
804 ObjectMonitor* mid = &block[i];
805 if (mid->object() != NULL) {
806 f->do_oop((oop*)mid->object_addr());
807 }
808 }
809 }
810 }
811
812
813 // -----------------------------------------------------------------------------
814 // ObjectMonitor Lifecycle
815 // -----------------------
816 // Inflation unlinks monitors from the global gFreeList and
817 // associates them with objects. Deflation -- which occurs at
818 // STW-time -- disassociates idle monitors from objects. Such
819 // scavenged monitors are returned to the gFreeList.
820 //
821 // The global list is protected by ListLock. All the critical sections
822 // are short and operate in constant-time.
823 //
824 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
825 //
826 // Lifecycle:
827 // -- unassigned and on the global free list
828 // -- unassigned and on a thread's private omFreeList
829 // -- assigned to an object. The object is inflated and the mark refers
830 // to the objectmonitor.
831 //
832
833
834 // Constraining monitor pool growth via MonitorBound ...
835 //
836 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
837 // the rate of scavenging is driven primarily by GC. As such, we can find
838 // an inordinate number of monitors in circulation.
839 // To avoid that scenario we can artificially induce a STW safepoint
840 // if the pool appears to be growing past some reasonable bound.
841 // Generally we favor time in space-time tradeoffs, but as there's no
842 // natural back-pressure on the # of extant monitors we need to impose some
843 // type of limit. Beware that if MonitorBound is set to too low a value
844 // we could just loop. In addition, if MonitorBound is set to a low value
845 // we'll incur more safepoints, which are harmful to performance.
846 // See also: GuaranteedSafepointInterval
847 //
848 // The current implementation uses asynchronous VM operations.
849 //
850
851 static void InduceScavenge (Thread * Self, const char * Whence) {
852 // Induce STW safepoint to trim monitors
853 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
854 // More precisely, trigger an asynchronous STW safepoint as the number
855 // of active monitors passes the specified threshold.
856 // TODO: assert thread state is reasonable
857
858 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
859 if (ObjectMonitor::Knob_Verbose) {
860 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
861 ::fflush(stdout) ;
862 }
863 // Induce a 'null' safepoint to scavenge monitors
864 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
865 // to the VMthread and have a lifespan longer than that of this activation record.
866 // The VMThread will delete the op when completed.
867 VMThread::execute (new VM_ForceAsyncSafepoint()) ;
868
869 if (ObjectMonitor::Knob_Verbose) {
870 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
871 ::fflush(stdout) ;
872 }
873 }
874 }
875 /* Too slow for general assert or debug
876 void ObjectSynchronizer::verifyInUse (Thread *Self) {
877 ObjectMonitor* mid;
878 int inusetally = 0;
879 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
880 inusetally ++;
881 }
882 assert(inusetally == Self->omInUseCount, "inuse count off");
883
884 int freetally = 0;
885 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
886 freetally ++;
887 }
888 assert(freetally == Self->omFreeCount, "free count off");
889 }
890 */
891 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
892 // A large MAXPRIVATE value reduces both list lock contention
893 // and list coherency traffic, but also tends to increase the
894 // number of objectMonitors in circulation as well as the STW
895 // scavenge costs. As usual, we lean toward time in space-time
896 // tradeoffs.
897 const int MAXPRIVATE = 1024 ;
898 for (;;) {
899 ObjectMonitor * m ;
900
901 // 1: try to allocate from the thread's local omFreeList.
902 // Threads will attempt to allocate first from their local list, then
903 // from the global list, and only after those attempts fail will the thread
904 // attempt to instantiate new monitors. Thread-local free lists take
905 // heat off the ListLock and improve allocation latency, as well as reducing
906 // coherency traffic on the shared global list.
907 m = Self->omFreeList ;
908 if (m != NULL) {
909 Self->omFreeList = m->FreeNext ;
910 Self->omFreeCount -- ;
911 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
912 guarantee (m->object() == NULL, "invariant") ;
913 if (MonitorInUseLists) {
914 m->FreeNext = Self->omInUseList;
915 Self->omInUseList = m;
916 Self->omInUseCount ++;
917 // verifyInUse(Self);
918 } else {
919 m->FreeNext = NULL;
920 }
921 return m ;
922 }
923
924 // 2: try to allocate from the global gFreeList
925 // CONSIDER: use muxTry() instead of muxAcquire().
926 // If the muxTry() fails then drop immediately into case 3.
927 // If we're using thread-local free lists then try
928 // to reprovision the caller's free list.
929 if (gFreeList != NULL) {
930 // Reprovision the thread's omFreeList.
931 // Use bulk transfers to reduce the allocation rate and heat
932 // on various locks.
933 Thread::muxAcquire (&ListLock, "omAlloc") ;
934 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
935 MonitorFreeCount --;
936 ObjectMonitor * take = gFreeList ;
937 gFreeList = take->FreeNext ;
938 guarantee (take->object() == NULL, "invariant") ;
939 guarantee (!take->is_busy(), "invariant") ;
940 take->Recycle() ;
941 omRelease (Self, take, false) ;
942 }
943 Thread::muxRelease (&ListLock) ;
944 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
945 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
946 TEVENT (omFirst - reprovision) ;
947
948 const int mx = MonitorBound ;
949 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
950 // We can't safely induce a STW safepoint from omAlloc() as our thread
951 // state may not be appropriate for such activities and callers may hold
952 // naked oops, so instead we defer the action.
953 InduceScavenge (Self, "omAlloc") ;
954 }
955 continue;
956 }
957
958 // 3: allocate a block of new ObjectMonitors
959 // Both the local and global free lists are empty -- resort to malloc().
960 // In the current implementation objectMonitors are TSM - immortal.
961 assert (_BLOCKSIZE > 1, "invariant") ;
962 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
963
964 // NOTE: (almost) no way to recover if allocation failed.
965 // We might be able to induce a STW safepoint and scavenge enough
966 // objectMonitors to permit progress.
967 if (temp == NULL) {
968 vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ;
969 }
970
971 // Format the block.
972 // initialize the linked list, each monitor points to its next
973 // forming the single linked free list, the very first monitor
974 // will points to next block, which forms the block list.
975 // The trick of using the 1st element in the block as gBlockList
976 // linkage should be reconsidered. A better implementation would
977 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
978
979 for (int i = 1; i < _BLOCKSIZE ; i++) {
980 temp[i].FreeNext = &temp[i+1];
981 }
982
983 // terminate the last monitor as the end of list
984 temp[_BLOCKSIZE - 1].FreeNext = NULL ;
985
986 // Element [0] is reserved for global list linkage
987 temp[0].set_object(CHAINMARKER);
988
989 // Consider carving out this thread's current request from the
990 // block in hand. This avoids some lock traffic and redundant
991 // list activity.
992
993 // Acquire the ListLock to manipulate BlockList and FreeList.
994 // An Oyama-Taura-Yonezawa scheme might be more efficient.
995 Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
996 MonitorPopulation += _BLOCKSIZE-1;
997 MonitorFreeCount += _BLOCKSIZE-1;
998
999 // Add the new block to the list of extant blocks (gBlockList).
1000 // The very first objectMonitor in a block is reserved and dedicated.
1001 // It serves as blocklist "next" linkage.
1002 temp[0].FreeNext = gBlockList;
1003 gBlockList = temp;
1004
1005 // Add the new string of objectMonitors to the global free list
1006 temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
1007 gFreeList = temp + 1;
1008 Thread::muxRelease (&ListLock) ;
1009 TEVENT (Allocate block of monitors) ;
1010 }
1011 }
1012
1013 // Place "m" on the caller's private per-thread omFreeList.
1014 // In practice there's no need to clamp or limit the number of
1015 // monitors on a thread's omFreeList as the only time we'll call
1016 // omRelease is to return a monitor to the free list after a CAS
1017 // attempt failed. This doesn't allow unbounded #s of monitors to
1018 // accumulate on a thread's free list.
1019 //
1020
1021 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
1022 guarantee (m->object() == NULL, "invariant") ;
1023
1024 // Remove from omInUseList
1025 if (MonitorInUseLists && fromPerThreadAlloc) {
1026 ObjectMonitor* curmidinuse = NULL;
1027 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
1028 if (m == mid) {
1029 // extract from per-thread in-use-list
1030 if (mid == Self->omInUseList) {
1031 Self->omInUseList = mid->FreeNext;
1032 } else if (curmidinuse != NULL) {
1033 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1034 }
1035 Self->omInUseCount --;
1036 // verifyInUse(Self);
1037 break;
1038 } else {
1039 curmidinuse = mid;
1040 mid = mid->FreeNext;
1041 }
1042 }
1043 }
1044
1045 // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
1046 m->FreeNext = Self->omFreeList ;
1047 Self->omFreeList = m ;
1048 Self->omFreeCount ++ ;
1049 }
1050
1051 // Return the monitors of a moribund thread's local free list to
1052 // the global free list. Typically a thread calls omFlush() when
1053 // it's dying. We could also consider having the VM thread steal
1054 // monitors from threads that have not run java code over a few
1055 // consecutive STW safepoints. Relatedly, we might decay
1056 // omFreeProvision at STW safepoints.
1057 //
1058 // Also return the monitors of a moribund thread"s omInUseList to
1059 // a global gOmInUseList under the global list lock so these
1060 // will continue to be scanned.
1061 //
1062 // We currently call omFlush() from the Thread:: dtor _after the thread
1063 // has been excised from the thread list and is no longer a mutator.
1064 // That means that omFlush() can run concurrently with a safepoint and
1065 // the scavenge operator. Calling omFlush() from JavaThread::exit() might
1066 // be a better choice as we could safely reason that that the JVM is
1067 // not at a safepoint at the time of the call, and thus there could
1068 // be not inopportune interleavings between omFlush() and the scavenge
1069 // operator.
1070
1071 void ObjectSynchronizer::omFlush (Thread * Self) {
1072 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
1073 Self->omFreeList = NULL ;
1074 ObjectMonitor * Tail = NULL ;
1075 int Tally = 0;
1076 if (List != NULL) {
1077 ObjectMonitor * s ;
1078 for (s = List ; s != NULL ; s = s->FreeNext) {
1079 Tally ++ ;
1080 Tail = s ;
1081 guarantee (s->object() == NULL, "invariant") ;
1082 guarantee (!s->is_busy(), "invariant") ;
1083 s->set_owner (NULL) ; // redundant but good hygiene
1084 TEVENT (omFlush - Move one) ;
1085 }
1086 guarantee (Tail != NULL && List != NULL, "invariant") ;
1087 }
1088
1089 ObjectMonitor * InUseList = Self->omInUseList;
1090 ObjectMonitor * InUseTail = NULL ;
1091 int InUseTally = 0;
1092 if (InUseList != NULL) {
1093 Self->omInUseList = NULL;
1094 ObjectMonitor *curom;
1095 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
1096 InUseTail = curom;
1097 InUseTally++;
1098 }
1099 // TODO debug
1100 assert(Self->omInUseCount == InUseTally, "inuse count off");
1101 Self->omInUseCount = 0;
1102 guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
1103 }
1104
1105 Thread::muxAcquire (&ListLock, "omFlush") ;
1106 if (Tail != NULL) {
1107 Tail->FreeNext = gFreeList ;
1108 gFreeList = List ;
1109 MonitorFreeCount += Tally;
1110 }
1111
1112 if (InUseTail != NULL) {
1113 InUseTail->FreeNext = gOmInUseList;
1114 gOmInUseList = InUseList;
1115 gOmInUseCount += InUseTally;
1116 }
1117
1118 Thread::muxRelease (&ListLock) ;
1119 TEVENT (omFlush) ;
1120 }
1121
1122 // Fast path code shared by multiple functions
1123 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1124 markOop mark = obj->mark();
1125 if (mark->has_monitor()) {
1126 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1127 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1128 return mark->monitor();
1129 }
1130 return ObjectSynchronizer::inflate(Thread::current(), obj);
1131 }
1132
1133
1134 // Note that we could encounter some performance loss through false-sharing as
1135 // multiple locks occupy the same $ line. Padding might be appropriate.
1136
1137
1007 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) { 1138 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
1008 // Inflate mutates the heap ... 1139 // Inflate mutates the heap ...
1009 // Relaxing assertion for bug 6320749. 1140 // Relaxing assertion for bug 6320749.
1010 assert (Universe::verify_in_progress() || 1141 assert (Universe::verify_in_progress() ||
1011 !SafepointSynchronize::is_at_safepoint(), "invariant") ; 1142 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1065 ObjectMonitor * m = omAlloc (Self) ; 1196 ObjectMonitor * m = omAlloc (Self) ;
1066 // Optimistically prepare the objectmonitor - anticipate successful CAS 1197 // Optimistically prepare the objectmonitor - anticipate successful CAS
1067 // We do this before the CAS in order to minimize the length of time 1198 // We do this before the CAS in order to minimize the length of time
1068 // in which INFLATING appears in the mark. 1199 // in which INFLATING appears in the mark.
1069 m->Recycle(); 1200 m->Recycle();
1070 m->FreeNext = NULL ;
1071 m->_Responsible = NULL ; 1201 m->_Responsible = NULL ;
1072 m->OwnerIsThread = 0 ; 1202 m->OwnerIsThread = 0 ;
1073 m->_recursions = 0 ; 1203 m->_recursions = 0 ;
1074 m->_SpinDuration = Knob_SpinLimit ; // Consider: maintain by type/class 1204 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class
1075 1205
1076 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ; 1206 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
1077 if (cmp != mark) { 1207 if (cmp != mark) {
1078 omRelease (Self, m) ; 1208 omRelease (Self, m, true) ;
1079 continue ; // Interference -- just retry 1209 continue ; // Interference -- just retry
1080 } 1210 }
1081 1211
1082 // We've successfully installed INFLATING (0) into the mark-word. 1212 // We've successfully installed INFLATING (0) into the mark-word.
1083 // This is the only case where 0 will appear in a mark-work. 1213 // This is the only case where 0 will appear in a mark-work.
1129 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ; 1259 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
1130 object->release_set_mark(markOopDesc::encode(m)); 1260 object->release_set_mark(markOopDesc::encode(m));
1131 1261
1132 // Hopefully the performance counters are allocated on distinct cache lines 1262 // Hopefully the performance counters are allocated on distinct cache lines
1133 // to avoid false sharing on MP systems ... 1263 // to avoid false sharing on MP systems ...
1134 if (_sync_Inflations != NULL) _sync_Inflations->inc() ; 1264 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1135 TEVENT(Inflate: overwrite stacklock) ; 1265 TEVENT(Inflate: overwrite stacklock) ;
1136 if (TraceMonitorInflation) { 1266 if (TraceMonitorInflation) {
1137 if (object->is_instance()) { 1267 if (object->is_instance()) {
1138 ResourceMark rm; 1268 ResourceMark rm;
1139 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1269 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1161 m->set_header(mark); 1291 m->set_header(mark);
1162 m->set_owner(NULL); 1292 m->set_owner(NULL);
1163 m->set_object(object); 1293 m->set_object(object);
1164 m->OwnerIsThread = 1 ; 1294 m->OwnerIsThread = 1 ;
1165 m->_recursions = 0 ; 1295 m->_recursions = 0 ;
1166 m->FreeNext = NULL ;
1167 m->_Responsible = NULL ; 1296 m->_Responsible = NULL ;
1168 m->_SpinDuration = Knob_SpinLimit ; // consider: keep metastats by type/class 1297 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class
1169 1298
1170 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1299 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1171 m->set_object (NULL) ; 1300 m->set_object (NULL) ;
1172 m->set_owner (NULL) ; 1301 m->set_owner (NULL) ;
1173 m->OwnerIsThread = 0 ; 1302 m->OwnerIsThread = 0 ;
1174 m->Recycle() ; 1303 m->Recycle() ;
1175 omRelease (Self, m) ; 1304 omRelease (Self, m, true) ;
1176 m = NULL ; 1305 m = NULL ;
1177 continue ; 1306 continue ;
1178 // interference - the markword changed - just retry. 1307 // interference - the markword changed - just retry.
1179 // The state-transitions are one-way, so there's no chance of 1308 // The state-transitions are one-way, so there's no chance of
1180 // live-lock -- "Inflated" is an absorbing state. 1309 // live-lock -- "Inflated" is an absorbing state.
1181 } 1310 }
1182 1311
1183 // Hopefully the performance counters are allocated on distinct 1312 // Hopefully the performance counters are allocated on distinct
1184 // cache lines to avoid false sharing on MP systems ... 1313 // cache lines to avoid false sharing on MP systems ...
1185 if (_sync_Inflations != NULL) _sync_Inflations->inc() ; 1314 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1186 TEVENT(Inflate: overwrite neutral) ; 1315 TEVENT(Inflate: overwrite neutral) ;
1187 if (TraceMonitorInflation) { 1316 if (TraceMonitorInflation) {
1188 if (object->is_instance()) { 1317 if (object->is_instance()) {
1189 ResourceMark rm; 1318 ResourceMark rm;
1190 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1319 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1194 } 1323 }
1195 return m ; 1324 return m ;
1196 } 1325 }
1197 } 1326 }
1198 1327
1199 1328 // Note that we could encounter some performance loss through false-sharing as
1200 // This the fast monitor enter. The interpreter and compiler use 1329 // multiple locks occupy the same $ line. Padding might be appropriate.
1201 // some assembly copies of this code. Make sure update those code 1330
1202 // if the following function is changed. The implementation is 1331
1203 // extremely sensitive to race condition. Be careful. 1332 // Deflate_idle_monitors() is called at all safepoints, immediately
1204 1333 // after all mutators are stopped, but before any objects have moved.
1205 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) { 1334 // It traverses the list of known monitors, deflating where possible.
1206 if (UseBiasedLocking) { 1335 // The scavenged monitor are returned to the monitor free list.
1207 if (!SafepointSynchronize::is_at_safepoint()) { 1336 //
1208 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 1337 // Beware that we scavenge at *every* stop-the-world point.
1209 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 1338 // Having a large number of monitors in-circulation negatively
1210 return; 1339 // impacts the performance of some applications (e.g., PointBase).
1340 // Broadly, we want to minimize the # of monitors in circulation.
1341 //
1342 // We have added a flag, MonitorInUseLists, which creates a list
1343 // of active monitors for each thread. deflate_idle_monitors()
1344 // only scans the per-thread inuse lists. omAlloc() puts all
1345 // assigned monitors on the per-thread list. deflate_idle_monitors()
1346 // returns the non-busy monitors to the global free list.
1347 // When a thread dies, omFlush() adds the list of active monitors for
1348 // that thread to a global gOmInUseList acquiring the
1349 // global list lock. deflate_idle_monitors() acquires the global
1350 // list lock to scan for non-busy monitors to the global free list.
1351 // An alternative could have used a single global inuse list. The
1352 // downside would have been the additional cost of acquiring the global list lock
1353 // for every omAlloc().
1354 //
1355 // Perversely, the heap size -- and thus the STW safepoint rate --
1356 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1357 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1358 // This is an unfortunate aspect of this design.
1359 //
1360
1361 enum ManifestConstants {
1362 ClearResponsibleAtSTW = 0,
1363 MaximumRecheckInterval = 1000
1364 } ;
1365
1366 // Deflate a single monitor if not in use
1367 // Return true if deflated, false if in use
1368 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1369 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1370 bool deflated;
1371 // Normal case ... The monitor is associated with obj.
1372 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
1373 guarantee (mid == obj->mark()->monitor(), "invariant");
1374 guarantee (mid->header()->is_neutral(), "invariant");
1375
1376 if (mid->is_busy()) {
1377 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
1378 deflated = false;
1379 } else {
1380 // Deflate the monitor if it is no longer being used
1381 // It's idle - scavenge and return to the global free list
1382 // plain old deflation ...
1383 TEVENT (deflate_idle_monitors - scavenge1) ;
1384 if (TraceMonitorInflation) {
1385 if (obj->is_instance()) {
1386 ResourceMark rm;
1387 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1388 (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name());
1389 }
1390 }
1391
1392 // Restore the header back to obj
1393 obj->release_set_mark(mid->header());
1394 mid->clear();
1395
1396 assert (mid->object() == NULL, "invariant") ;
1397
1398 // Move the object to the working free list defined by FreeHead,FreeTail.
1399 if (*FreeHeadp == NULL) *FreeHeadp = mid;
1400 if (*FreeTailp != NULL) {
1401 ObjectMonitor * prevtail = *FreeTailp;
1402 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
1403 prevtail->FreeNext = mid;
1211 } 1404 }
1212 } else { 1405 *FreeTailp = mid;
1213 assert(!attempt_rebias, "can not rebias toward VM thread"); 1406 deflated = true;
1214 BiasedLocking::revoke_at_safepoint(obj); 1407 }
1215 } 1408 return deflated;
1216 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1409 }
1217 } 1410
1218 1411 // Caller acquires ListLock
1219 slow_enter (obj, lock, THREAD) ; 1412 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
1220 } 1413 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1221 1414 ObjectMonitor* mid;
1222 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 1415 ObjectMonitor* next;
1223 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 1416 ObjectMonitor* curmidinuse = NULL;
1224 // if displaced header is null, the previous enter is recursive enter, no-op 1417 int deflatedcount = 0;
1225 markOop dhw = lock->displaced_header(); 1418
1226 markOop mark ; 1419 for (mid = *listheadp; mid != NULL; ) {
1227 if (dhw == NULL) { 1420 oop obj = (oop) mid->object();
1228 // Recursive stack-lock. 1421 bool deflated = false;
1229 // Diagnostics -- Could be: stack-locked, inflating, inflated. 1422 if (obj != NULL) {
1230 mark = object->mark() ; 1423 deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
1231 assert (!mark->is_neutral(), "invariant") ;
1232 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
1233 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
1234 } 1424 }
1235 if (mark->has_monitor()) { 1425 if (deflated) {
1236 ObjectMonitor * m = mark->monitor() ; 1426 // extract from per-thread in-use-list
1237 assert(((oop)(m->object()))->mark() == mark, "invariant") ; 1427 if (mid == *listheadp) {
1238 assert(m->is_entered(THREAD), "invariant") ; 1428 *listheadp = mid->FreeNext;
1429 } else if (curmidinuse != NULL) {
1430 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1431 }
1432 next = mid->FreeNext;
1433 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
1434 mid = next;
1435 deflatedcount++;
1436 } else {
1437 curmidinuse = mid;
1438 mid = mid->FreeNext;
1439 }
1440 }
1441 return deflatedcount;
1442 }
1443
1444 void ObjectSynchronizer::deflate_idle_monitors() {
1445 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1446 int nInuse = 0 ; // currently associated with objects
1447 int nInCirculation = 0 ; // extant
1448 int nScavenged = 0 ; // reclaimed
1449 bool deflated = false;
1450
1451 ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors
1452 ObjectMonitor * FreeTail = NULL ;
1453
1454 TEVENT (deflate_idle_monitors) ;
1455 // Prevent omFlush from changing mids in Thread dtor's during deflation
1456 // And in case the vm thread is acquiring a lock during a safepoint
1457 // See e.g. 6320749
1458 Thread::muxAcquire (&ListLock, "scavenge - return") ;
1459
1460 if (MonitorInUseLists) {
1461 int inUse = 0;
1462 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
1463 nInCirculation+= cur->omInUseCount;
1464 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
1465 cur->omInUseCount-= deflatedcount;
1466 // verifyInUse(cur);
1467 nScavenged += deflatedcount;
1468 nInuse += cur->omInUseCount;
1239 } 1469 }
1240 return ; 1470
1241 } 1471 // For moribund threads, scan gOmInUseList
1242 1472 if (gOmInUseList) {
1243 mark = object->mark() ; 1473 nInCirculation += gOmInUseCount;
1244 1474 int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
1245 // If the object is stack-locked by the current thread, try to 1475 gOmInUseCount-= deflatedcount;
1246 // swing the displaced header from the box back to the mark. 1476 nScavenged += deflatedcount;
1247 if (mark == (markOop) lock) { 1477 nInuse += gOmInUseCount;
1248 assert (dhw->is_neutral(), "invariant") ; 1478 }
1249 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 1479
1250 TEVENT (fast_exit: release stacklock) ; 1480 } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
1251 return; 1481 // Iterate over all extant monitors - Scavenge all idle monitors.
1252 } 1482 assert(block->object() == CHAINMARKER, "must be a block header");
1253 } 1483 nInCirculation += _BLOCKSIZE ;
1254 1484 for (int i = 1 ; i < _BLOCKSIZE; i++) {
1255 ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ; 1485 ObjectMonitor* mid = &block[i];
1256 } 1486 oop obj = (oop) mid->object();
1257 1487
1258 // This routine is used to handle interpreter/compiler slow case 1488 if (obj == NULL) {
1259 // We don't need to use fast path here, because it must have been 1489 // The monitor is not associated with an object.
1260 // failed in the interpreter/compiler code. 1490 // The monitor should either be a thread-specific private
1261 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 1491 // free list or the global free list.
1262 markOop mark = obj->mark(); 1492 // obj == NULL IMPLIES mid->is_busy() == 0
1263 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 1493 guarantee (!mid->is_busy(), "invariant") ;
1264 1494 continue ;
1265 if (mark->is_neutral()) { 1495 }
1266 // Anticipate successful CAS -- the ST of the displaced mark must 1496 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
1267 // be visible <= the ST performed by the CAS. 1497
1268 lock->set_displaced_header(mark); 1498 if (deflated) {
1269 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 1499 mid->FreeNext = NULL ;
1270 TEVENT (slow_enter: release stacklock) ; 1500 nScavenged ++ ;
1271 return ; 1501 } else {
1272 } 1502 nInuse ++;
1273 // Fall through to inflate() ... 1503 }
1274 } else 1504 }
1275 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 1505 }
1276 assert(lock != mark->locker(), "must not re-lock the same lock"); 1506
1277 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 1507 MonitorFreeCount += nScavenged;
1278 lock->set_displaced_header(NULL); 1508
1279 return; 1509 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
1280 } 1510
1281 1511 if (ObjectMonitor::Knob_Verbose) {
1282 #if 0 1512 ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
1283 // The following optimization isn't particularly useful. 1513 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
1284 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { 1514 MonitorPopulation, MonitorFreeCount) ;
1285 lock->set_displaced_header (NULL) ; 1515 ::fflush(stdout) ;
1286 return ; 1516 }
1287 } 1517
1288 #endif 1518 ForceMonitorScavenge = 0; // Reset
1289 1519
1290 // The object header will never be displaced to this lock, 1520 // Move the scavenged monitors back to the global free list.
1291 // so it does not matter what the value is, except that it 1521 if (FreeHead != NULL) {
1292 // must be non-zero to avoid looking like a re-entrant lock, 1522 guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
1293 // and must not look locked either. 1523 assert (FreeTail->FreeNext == NULL, "invariant") ;
1294 lock->set_displaced_header(markOopDesc::unused_mark()); 1524 // constant-time list splice - prepend scavenged segment to gFreeList
1295 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 1525 FreeTail->FreeNext = gFreeList ;
1296 } 1526 gFreeList = FreeHead ;
1297 1527 }
1298 // This routine is used to handle interpreter/compiler slow case 1528 Thread::muxRelease (&ListLock) ;
1299 // We don't need to use fast path here, because it must have 1529
1300 // failed in the interpreter/compiler code. Simply use the heavy 1530 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ;
1301 // weight monitor should be ok, unless someone find otherwise. 1531 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
1302 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 1532
1303 fast_exit (object, lock, THREAD) ; 1533 // TODO: Add objectMonitor leak detection.
1304 } 1534 // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1305 1535 GVars.stwRandom = os::random() ;
1306 // NOTE: must use heavy weight monitor to handle jni monitor enter 1536 GVars.stwCycle ++ ;
1307 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter 1537 }
1308 // the current locking is from JNI instead of Java code 1538
1309 TEVENT (jni_enter) ; 1539 // Monitor cleanup on JavaThread::exit
1310 if (UseBiasedLocking) {
1311 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1312 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1313 }
1314 THREAD->set_current_pending_monitor_is_from_java(false);
1315 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
1316 THREAD->set_current_pending_monitor_is_from_java(true);
1317 }
1318
1319 // NOTE: must use heavy weight monitor to handle jni monitor enter
1320 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
1321 if (UseBiasedLocking) {
1322 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1323 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1324 }
1325
1326 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
1327 return monitor->try_enter(THREAD);
1328 }
1329
1330
1331 // NOTE: must use heavy weight monitor to handle jni monitor exit
1332 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
1333 TEVENT (jni_exit) ;
1334 if (UseBiasedLocking) {
1335 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1336 }
1337 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1338
1339 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
1340 // If this thread has locked the object, exit the monitor. Note: can't use
1341 // monitor->check(CHECK); must exit even if an exception is pending.
1342 if (monitor->check(THREAD)) {
1343 monitor->exit(THREAD);
1344 }
1345 }
1346
1347 // complete_exit()/reenter() are used to wait on a nested lock
1348 // i.e. to give up an outer lock completely and then re-enter
1349 // Used when holding nested locks - lock acquisition order: lock1 then lock2
1350 // 1) complete_exit lock1 - saving recursion count
1351 // 2) wait on lock2
1352 // 3) when notified on lock2, unlock lock2
1353 // 4) reenter lock1 with original recursion count
1354 // 5) lock lock2
1355 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
1356 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
1357 TEVENT (complete_exit) ;
1358 if (UseBiasedLocking) {
1359 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1360 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1361 }
1362
1363 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
1364
1365 return monitor->complete_exit(THREAD);
1366 }
1367
1368 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
1369 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
1370 TEVENT (reenter) ;
1371 if (UseBiasedLocking) {
1372 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1373 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1374 }
1375
1376 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
1377
1378 monitor->reenter(recursion, THREAD);
1379 }
1380
1381 // This exists only as a workaround of dtrace bug 6254741
1382 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
1383 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
1384 return 0;
1385 }
1386
1387 // NOTE: must use heavy weight monitor to handle wait()
1388 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
1389 if (UseBiasedLocking) {
1390 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1391 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1392 }
1393 if (millis < 0) {
1394 TEVENT (wait - throw IAX) ;
1395 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
1396 }
1397 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
1398 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
1399 monitor->wait(millis, true, THREAD);
1400
1401 /* This dummy call is in place to get around dtrace bug 6254741. Once
1402 that's fixed we can uncomment the following line and remove the call */
1403 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
1404 dtrace_waited_probe(monitor, obj, THREAD);
1405 }
1406
1407 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
1408 if (UseBiasedLocking) {
1409 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1410 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1411 }
1412 if (millis < 0) {
1413 TEVENT (wait - throw IAX) ;
1414 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
1415 }
1416 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
1417 }
1418
1419 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
1420 if (UseBiasedLocking) {
1421 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1422 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1423 }
1424
1425 markOop mark = obj->mark();
1426 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
1427 return;
1428 }
1429 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
1430 }
1431
1432 // NOTE: see comment of notify()
1433 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
1434 if (UseBiasedLocking) {
1435 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1436 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1437 }
1438
1439 markOop mark = obj->mark();
1440 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
1441 return;
1442 }
1443 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
1444 }
1445
1446 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
1447 if (UseBiasedLocking) {
1448 // NOTE: many places throughout the JVM do not expect a safepoint
1449 // to be taken here, in particular most operations on perm gen
1450 // objects. However, we only ever bias Java instances and all of
1451 // the call sites of identity_hash that might revoke biases have
1452 // been checked to make sure they can handle a safepoint. The
1453 // added check of the bias pattern is to avoid useless calls to
1454 // thread-local storage.
1455 if (obj->mark()->has_bias_pattern()) {
1456 // Box and unbox the raw reference just in case we cause a STW safepoint.
1457 Handle hobj (Self, obj) ;
1458 // Relaxing assertion for bug 6320749.
1459 assert (Universe::verify_in_progress() ||
1460 !SafepointSynchronize::is_at_safepoint(),
1461 "biases should not be seen by VM thread here");
1462 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
1463 obj = hobj() ;
1464 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1465 }
1466 }
1467
1468 // hashCode() is a heap mutator ...
1469 // Relaxing assertion for bug 6320749.
1470 assert (Universe::verify_in_progress() ||
1471 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1472 assert (Universe::verify_in_progress() ||
1473 Self->is_Java_thread() , "invariant") ;
1474 assert (Universe::verify_in_progress() ||
1475 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
1476
1477 ObjectMonitor* monitor = NULL;
1478 markOop temp, test;
1479 intptr_t hash;
1480 markOop mark = ReadStableMark (obj);
1481
1482 // object should remain ineligible for biased locking
1483 assert (!mark->has_bias_pattern(), "invariant") ;
1484
1485 if (mark->is_neutral()) {
1486 hash = mark->hash(); // this is a normal header
1487 if (hash) { // if it has hash, just return it
1488 return hash;
1489 }
1490 hash = get_next_hash(Self, obj); // allocate a new hash code
1491 temp = mark->copy_set_hash(hash); // merge the hash code into header
1492 // use (machine word version) atomic operation to install the hash
1493 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
1494 if (test == mark) {
1495 return hash;
1496 }
1497 // If atomic operation failed, we must inflate the header
1498 // into heavy weight monitor. We could add more code here
1499 // for fast path, but it does not worth the complexity.
1500 } else if (mark->has_monitor()) {
1501 monitor = mark->monitor();
1502 temp = monitor->header();
1503 assert (temp->is_neutral(), "invariant") ;
1504 hash = temp->hash();
1505 if (hash) {
1506 return hash;
1507 }
1508 // Skip to the following code to reduce code size
1509 } else if (Self->is_lock_owned((address)mark->locker())) {
1510 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
1511 assert (temp->is_neutral(), "invariant") ;
1512 hash = temp->hash(); // by current thread, check if the displaced
1513 if (hash) { // header contains hash code
1514 return hash;
1515 }
1516 // WARNING:
1517 // The displaced header is strictly immutable.
1518 // It can NOT be changed in ANY cases. So we have
1519 // to inflate the header into heavyweight monitor
1520 // even the current thread owns the lock. The reason
1521 // is the BasicLock (stack slot) will be asynchronously
1522 // read by other threads during the inflate() function.
1523 // Any change to stack may not propagate to other threads
1524 // correctly.
1525 }
1526
1527 // Inflate the monitor to set hash code
1528 monitor = ObjectSynchronizer::inflate(Self, obj);
1529 // Load displaced header and check it has hash code
1530 mark = monitor->header();
1531 assert (mark->is_neutral(), "invariant") ;
1532 hash = mark->hash();
1533 if (hash == 0) {
1534 hash = get_next_hash(Self, obj);
1535 temp = mark->copy_set_hash(hash); // merge hash code into header
1536 assert (temp->is_neutral(), "invariant") ;
1537 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
1538 if (test != mark) {
1539 // The only update to the header in the monitor (outside GC)
1540 // is install the hash code. If someone add new usage of
1541 // displaced header, please update this code
1542 hash = test->hash();
1543 assert (test->is_neutral(), "invariant") ;
1544 assert (hash != 0, "Trivial unexpected object/monitor header usage.");
1545 }
1546 }
1547 // We finally get the hash
1548 return hash;
1549 }
1550
1551 // Deprecated -- use FastHashCode() instead.
1552
1553 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1554 return FastHashCode (Thread::current(), obj()) ;
1555 }
1556
1557 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
1558 Handle h_obj) {
1559 if (UseBiasedLocking) {
1560 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
1561 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1562 }
1563
1564 assert(thread == JavaThread::current(), "Can only be called on current thread");
1565 oop obj = h_obj();
1566
1567 markOop mark = ReadStableMark (obj) ;
1568
1569 // Uncontended case, header points to stack
1570 if (mark->has_locker()) {
1571 return thread->is_lock_owned((address)mark->locker());
1572 }
1573 // Contended case, header points to ObjectMonitor (tagged pointer)
1574 if (mark->has_monitor()) {
1575 ObjectMonitor* monitor = mark->monitor();
1576 return monitor->is_entered(thread) != 0 ;
1577 }
1578 // Unlocked case, header in place
1579 assert(mark->is_neutral(), "sanity check");
1580 return false;
1581 }
1582
1583 // Be aware of this method could revoke bias of the lock object.
1584 // This method querys the ownership of the lock handle specified by 'h_obj'.
1585 // If the current thread owns the lock, it returns owner_self. If no
1586 // thread owns the lock, it returns owner_none. Otherwise, it will return
1587 // ower_other.
1588 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
1589 (JavaThread *self, Handle h_obj) {
1590 // The caller must beware this method can revoke bias, and
1591 // revocation can result in a safepoint.
1592 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
1593 assert (self->thread_state() != _thread_blocked , "invariant") ;
1594
1595 // Possible mark states: neutral, biased, stack-locked, inflated
1596
1597 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
1598 // CASE: biased
1599 BiasedLocking::revoke_and_rebias(h_obj, false, self);
1600 assert(!h_obj->mark()->has_bias_pattern(),
1601 "biases should be revoked by now");
1602 }
1603
1604 assert(self == JavaThread::current(), "Can only be called on current thread");
1605 oop obj = h_obj();
1606 markOop mark = ReadStableMark (obj) ;
1607
1608 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
1609 if (mark->has_locker()) {
1610 return self->is_lock_owned((address)mark->locker()) ?
1611 owner_self : owner_other;
1612 }
1613
1614 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
1615 // The Object:ObjectMonitor relationship is stable as long as we're
1616 // not at a safepoint.
1617 if (mark->has_monitor()) {
1618 void * owner = mark->monitor()->_owner ;
1619 if (owner == NULL) return owner_none ;
1620 return (owner == self ||
1621 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
1622 }
1623
1624 // CASE: neutral
1625 assert(mark->is_neutral(), "sanity check");
1626 return owner_none ; // it's unlocked
1627 }
1628
1629 // FIXME: jvmti should call this
1630 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
1631 if (UseBiasedLocking) {
1632 if (SafepointSynchronize::is_at_safepoint()) {
1633 BiasedLocking::revoke_at_safepoint(h_obj);
1634 } else {
1635 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
1636 }
1637 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1638 }
1639
1640 oop obj = h_obj();
1641 address owner = NULL;
1642
1643 markOop mark = ReadStableMark (obj) ;
1644
1645 // Uncontended case, header points to stack
1646 if (mark->has_locker()) {
1647 owner = (address) mark->locker();
1648 }
1649
1650 // Contended case, header points to ObjectMonitor (tagged pointer)
1651 if (mark->has_monitor()) {
1652 ObjectMonitor* monitor = mark->monitor();
1653 assert(monitor != NULL, "monitor should be non-null");
1654 owner = (address) monitor->owner();
1655 }
1656
1657 if (owner != NULL) {
1658 return Threads::owning_thread_from_monitor_owner(owner, doLock);
1659 }
1660
1661 // Unlocked case, header in place
1662 // Cannot have assertion since this object may have been
1663 // locked by another thread when reaching here.
1664 // assert(mark->is_neutral(), "sanity check");
1665
1666 return NULL;
1667 }
1668 1540
1669 // Iterate through monitor cache and attempt to release thread's monitors 1541 // Iterate through monitor cache and attempt to release thread's monitors
1670 // Gives up on a particular monitor if an exception occurs, but continues 1542 // Gives up on a particular monitor if an exception occurs, but continues
1671 // the overall iteration, swallowing the exception. 1543 // the overall iteration, swallowing the exception.
1672 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1544 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1704 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); 1576 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
1705 ObjectSynchronizer::monitors_iterate(&rjmc); 1577 ObjectSynchronizer::monitors_iterate(&rjmc);
1706 Thread::muxRelease(&ListLock); 1578 Thread::muxRelease(&ListLock);
1707 THREAD->clear_pending_exception(); 1579 THREAD->clear_pending_exception();
1708 } 1580 }
1709
1710 // Visitors ...
1711
1712 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1713 ObjectMonitor* block = gBlockList;
1714 ObjectMonitor* mid;
1715 while (block) {
1716 assert(block->object() == CHAINMARKER, "must be a block header");
1717 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1718 mid = block + i;
1719 oop object = (oop) mid->object();
1720 if (object != NULL) {
1721 closure->do_monitor(mid);
1722 }
1723 }
1724 block = (ObjectMonitor*) block->FreeNext;
1725 }
1726 }
1727
1728 void ObjectSynchronizer::oops_do(OopClosure* f) {
1729 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1730 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
1731 assert(block->object() == CHAINMARKER, "must be a block header");
1732 for (int i = 1; i < _BLOCKSIZE; i++) {
1733 ObjectMonitor* mid = &block[i];
1734 if (mid->object() != NULL) {
1735 f->do_oop((oop*)mid->object_addr());
1736 }
1737 }
1738 }
1739 }
1740
1741 // Deflate_idle_monitors() is called at all safepoints, immediately
1742 // after all mutators are stopped, but before any objects have moved.
1743 // It traverses the list of known monitors, deflating where possible.
1744 // The scavenged monitor are returned to the monitor free list.
1745 //
1746 // Beware that we scavenge at *every* stop-the-world point.
1747 // Having a large number of monitors in-circulation negatively
1748 // impacts the performance of some applications (e.g., PointBase).
1749 // Broadly, we want to minimize the # of monitors in circulation.
1750 // Alternately, we could partition the active monitors into sub-lists
1751 // of those that need scanning and those that do not.
1752 // Specifically, we would add a new sub-list of objectmonitors
1753 // that are in-circulation and potentially active. deflate_idle_monitors()
1754 // would scan only that list. Other monitors could reside on a quiescent
1755 // list. Such sequestered monitors wouldn't need to be scanned by
1756 // deflate_idle_monitors(). omAlloc() would first check the global free list,
1757 // then the quiescent list, and, failing those, would allocate a new block.
1758 // Deflate_idle_monitors() would scavenge and move monitors to the
1759 // quiescent list.
1760 //
1761 // Perversely, the heap size -- and thus the STW safepoint rate --
1762 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1763 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1764 // This is an unfortunate aspect of this design.
1765 //
1766 // Another refinement would be to refrain from calling deflate_idle_monitors()
1767 // except at stop-the-world points associated with garbage collections.
1768 //
1769 // An even better solution would be to deflate on-the-fly, aggressively,
1770 // at monitorexit-time as is done in EVM's metalock or Relaxed Locks.
1771
1772 void ObjectSynchronizer::deflate_idle_monitors() {
1773 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1774 int nInuse = 0 ; // currently associated with objects
1775 int nInCirculation = 0 ; // extant
1776 int nScavenged = 0 ; // reclaimed
1777
1778 ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors
1779 ObjectMonitor * FreeTail = NULL ;
1780
1781 // Iterate over all extant monitors - Scavenge all idle monitors.
1782 TEVENT (deflate_idle_monitors) ;
1783 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
1784 assert(block->object() == CHAINMARKER, "must be a block header");
1785 nInCirculation += _BLOCKSIZE ;
1786 for (int i = 1 ; i < _BLOCKSIZE; i++) {
1787 ObjectMonitor* mid = &block[i];
1788 oop obj = (oop) mid->object();
1789
1790 if (obj == NULL) {
1791 // The monitor is not associated with an object.
1792 // The monitor should either be a thread-specific private
1793 // free list or the global free list.
1794 // obj == NULL IMPLIES mid->is_busy() == 0
1795 guarantee (!mid->is_busy(), "invariant") ;
1796 continue ;
1797 }
1798
1799 // Normal case ... The monitor is associated with obj.
1800 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
1801 guarantee (mid == obj->mark()->monitor(), "invariant");
1802 guarantee (mid->header()->is_neutral(), "invariant");
1803
1804 if (mid->is_busy()) {
1805 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
1806 nInuse ++ ;
1807 } else {
1808 // Deflate the monitor if it is no longer being used
1809 // It's idle - scavenge and return to the global free list
1810 // plain old deflation ...
1811 TEVENT (deflate_idle_monitors - scavenge1) ;
1812 if (TraceMonitorInflation) {
1813 if (obj->is_instance()) {
1814 ResourceMark rm;
1815 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1816 (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name());
1817 }
1818 }
1819
1820 // Restore the header back to obj
1821 obj->release_set_mark(mid->header());
1822 mid->clear();
1823
1824 assert (mid->object() == NULL, "invariant") ;
1825
1826 // Move the object to the working free list defined by FreeHead,FreeTail.
1827 mid->FreeNext = NULL ;
1828 if (FreeHead == NULL) FreeHead = mid ;
1829 if (FreeTail != NULL) FreeTail->FreeNext = mid ;
1830 FreeTail = mid ;
1831 nScavenged ++ ;
1832 }
1833 }
1834 }
1835
1836 // Move the scavenged monitors back to the global free list.
1837 // In theory we don't need the freelist lock as we're at a STW safepoint.
1838 // omAlloc() and omFree() can only be called while a thread is _not in safepoint state.
1839 // But it's remotely possible that omFlush() or release_monitors_owned_by_thread()
1840 // might be called while not at a global STW safepoint. In the interest of
1841 // safety we protect the following access with ListLock.
1842 // An even more conservative and prudent approach would be to guard
1843 // the main loop in scavenge_idle_monitors() with ListLock.
1844 if (FreeHead != NULL) {
1845 guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
1846 assert (FreeTail->FreeNext == NULL, "invariant") ;
1847 // constant-time list splice - prepend scavenged segment to gFreeList
1848 Thread::muxAcquire (&ListLock, "scavenge - return") ;
1849 FreeTail->FreeNext = gFreeList ;
1850 gFreeList = FreeHead ;
1851 Thread::muxRelease (&ListLock) ;
1852 }
1853
1854 if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ;
1855 if (_sync_MonExtant != NULL) _sync_MonExtant ->set_value(nInCirculation);
1856
1857 // TODO: Add objectMonitor leak detection.
1858 // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1859 GVars.stwRandom = os::random() ;
1860 GVars.stwCycle ++ ;
1861 }
1862
1863 // A macro is used below because there may already be a pending
1864 // exception which should not abort the execution of the routines
1865 // which use this (which is why we don't put this into check_slow and
1866 // call it with a CHECK argument).
1867
1868 #define CHECK_OWNER() \
1869 do { \
1870 if (THREAD != _owner) { \
1871 if (THREAD->is_lock_owned((address) _owner)) { \
1872 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \
1873 _recursions = 0; \
1874 OwnerIsThread = 1 ; \
1875 } else { \
1876 TEVENT (Throw IMSX) ; \
1877 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1878 } \
1879 } \
1880 } while (false)
1881
1882 // TODO-FIXME: eliminate ObjectWaiters. Replace this visitor/enumerator
1883 // interface with a simple FirstWaitingThread(), NextWaitingThread() interface.
1884
1885 ObjectWaiter* ObjectMonitor::first_waiter() {
1886 return _WaitSet;
1887 }
1888
1889 ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) {
1890 return o->_next;
1891 }
1892
1893 Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) {
1894 return o->_thread;
1895 }
1896
1897 // initialize the monitor, exception the semaphore, all other fields
1898 // are simple integers or pointers
1899 ObjectMonitor::ObjectMonitor() {
1900 _header = NULL;
1901 _count = 0;
1902 _waiters = 0,
1903 _recursions = 0;
1904 _object = NULL;
1905 _owner = NULL;
1906 _WaitSet = NULL;
1907 _WaitSetLock = 0 ;
1908 _Responsible = NULL ;
1909 _succ = NULL ;
1910 _cxq = NULL ;
1911 FreeNext = NULL ;
1912 _EntryList = NULL ;
1913 _SpinFreq = 0 ;
1914 _SpinClock = 0 ;
1915 OwnerIsThread = 0 ;
1916 }
1917
1918 ObjectMonitor::~ObjectMonitor() {
1919 // TODO: Add asserts ...
1920 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
1921 // _count == 0 _EntryList == NULL etc
1922 }
1923
1924 intptr_t ObjectMonitor::is_busy() const {
1925 // TODO-FIXME: merge _count and _waiters.
1926 // TODO-FIXME: assert _owner == null implies _recursions = 0
1927 // TODO-FIXME: assert _WaitSet != null implies _count > 0
1928 return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
1929 }
1930
1931 void ObjectMonitor::Recycle () {
1932 // TODO: add stronger asserts ...
1933 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
1934 // _count == 0 EntryList == NULL
1935 // _recursions == 0 _WaitSet == NULL
1936 // TODO: assert (is_busy()|_recursions) == 0
1937 _succ = NULL ;
1938 _EntryList = NULL ;
1939 _cxq = NULL ;
1940 _WaitSet = NULL ;
1941 _recursions = 0 ;
1942 _SpinFreq = 0 ;
1943 _SpinClock = 0 ;
1944 OwnerIsThread = 0 ;
1945 }
1946
1947 // WaitSet management ...
1948
1949 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
1950 assert(node != NULL, "should not dequeue NULL node");
1951 assert(node->_prev == NULL, "node already in list");
1952 assert(node->_next == NULL, "node already in list");
1953 // put node at end of queue (circular doubly linked list)
1954 if (_WaitSet == NULL) {
1955 _WaitSet = node;
1956 node->_prev = node;
1957 node->_next = node;
1958 } else {
1959 ObjectWaiter* head = _WaitSet ;
1960 ObjectWaiter* tail = head->_prev;
1961 assert(tail->_next == head, "invariant check");
1962 tail->_next = node;
1963 head->_prev = node;
1964 node->_next = head;
1965 node->_prev = tail;
1966 }
1967 }
1968
1969 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
1970 // dequeue the very first waiter
1971 ObjectWaiter* waiter = _WaitSet;
1972 if (waiter) {
1973 DequeueSpecificWaiter(waiter);
1974 }
1975 return waiter;
1976 }
1977
1978 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
1979 assert(node != NULL, "should not dequeue NULL node");
1980 assert(node->_prev != NULL, "node already removed from list");
1981 assert(node->_next != NULL, "node already removed from list");
1982 // when the waiter has woken up because of interrupt,
1983 // timeout or other spurious wake-up, dequeue the
1984 // waiter from waiting list
1985 ObjectWaiter* next = node->_next;
1986 if (next == node) {
1987 assert(node->_prev == node, "invariant check");
1988 _WaitSet = NULL;
1989 } else {
1990 ObjectWaiter* prev = node->_prev;
1991 assert(prev->_next == node, "invariant check");
1992 assert(next->_prev == node, "invariant check");
1993 next->_prev = prev;
1994 prev->_next = next;
1995 if (_WaitSet == node) {
1996 _WaitSet = next;
1997 }
1998 }
1999 node->_next = NULL;
2000 node->_prev = NULL;
2001 }
2002
2003 static char * kvGet (char * kvList, const char * Key) {
2004 if (kvList == NULL) return NULL ;
2005 size_t n = strlen (Key) ;
2006 char * Search ;
2007 for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
2008 if (strncmp (Search, Key, n) == 0) {
2009 if (Search[n] == '=') return Search + n + 1 ;
2010 if (Search[n] == 0) return (char *) "1" ;
2011 }
2012 }
2013 return NULL ;
2014 }
2015
2016 static int kvGetInt (char * kvList, const char * Key, int Default) {
2017 char * v = kvGet (kvList, Key) ;
2018 int rslt = v ? ::strtol (v, NULL, 0) : Default ;
2019 if (Knob_ReportSettings && v != NULL) {
2020 ::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
2021 ::fflush (stdout) ;
2022 }
2023 return rslt ;
2024 }
2025
2026 // By convention we unlink a contending thread from EntryList|cxq immediately
2027 // after the thread acquires the lock in ::enter(). Equally, we could defer
2028 // unlinking the thread until ::exit()-time.
2029
2030 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
2031 {
2032 assert (_owner == Self, "invariant") ;
2033 assert (SelfNode->_thread == Self, "invariant") ;
2034
2035 if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
2036 // Normal case: remove Self from the DLL EntryList .
2037 // This is a constant-time operation.
2038 ObjectWaiter * nxt = SelfNode->_next ;
2039 ObjectWaiter * prv = SelfNode->_prev ;
2040 if (nxt != NULL) nxt->_prev = prv ;
2041 if (prv != NULL) prv->_next = nxt ;
2042 if (SelfNode == _EntryList ) _EntryList = nxt ;
2043 assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
2044 assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
2045 TEVENT (Unlink from EntryList) ;
2046 } else {
2047 guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
2048 // Inopportune interleaving -- Self is still on the cxq.
2049 // This usually means the enqueue of self raced an exiting thread.
2050 // Normally we'll find Self near the front of the cxq, so
2051 // dequeueing is typically fast. If needbe we can accelerate
2052 // this with some MCS/CHL-like bidirectional list hints and advisory
2053 // back-links so dequeueing from the interior will normally operate
2054 // in constant-time.
2055 // Dequeue Self from either the head (with CAS) or from the interior
2056 // with a linear-time scan and normal non-atomic memory operations.
2057 // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
2058 // and then unlink Self from EntryList. We have to drain eventually,
2059 // so it might as well be now.
2060
2061 ObjectWaiter * v = _cxq ;
2062 assert (v != NULL, "invariant") ;
2063 if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
2064 // The CAS above can fail from interference IFF a "RAT" arrived.
2065 // In that case Self must be in the interior and can no longer be
2066 // at the head of cxq.
2067 if (v == SelfNode) {
2068 assert (_cxq != v, "invariant") ;
2069 v = _cxq ; // CAS above failed - start scan at head of list
2070 }
2071 ObjectWaiter * p ;
2072 ObjectWaiter * q = NULL ;
2073 for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
2074 q = p ;
2075 assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
2076 }
2077 assert (v != SelfNode, "invariant") ;
2078 assert (p == SelfNode, "Node not found on cxq") ;
2079 assert (p != _cxq, "invariant") ;
2080 assert (q != NULL, "invariant") ;
2081 assert (q->_next == p, "invariant") ;
2082 q->_next = p->_next ;
2083 }
2084 TEVENT (Unlink from cxq) ;
2085 }
2086
2087 // Diagnostic hygiene ...
2088 SelfNode->_prev = (ObjectWaiter *) 0xBAD ;
2089 SelfNode->_next = (ObjectWaiter *) 0xBAD ;
2090 SelfNode->TState = ObjectWaiter::TS_RUN ;
2091 }
2092
2093 // Caveat: TryLock() is not necessarily serializing if it returns failure.
2094 // Callers must compensate as needed.
2095
2096 int ObjectMonitor::TryLock (Thread * Self) {
2097 for (;;) {
2098 void * own = _owner ;
2099 if (own != NULL) return 0 ;
2100 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
2101 // Either guarantee _recursions == 0 or set _recursions = 0.
2102 assert (_recursions == 0, "invariant") ;
2103 assert (_owner == Self, "invariant") ;
2104 // CONSIDER: set or assert that OwnerIsThread == 1
2105 return 1 ;
2106 }
2107 // The lock had been free momentarily, but we lost the race to the lock.
2108 // Interference -- the CAS failed.
2109 // We can either return -1 or retry.
2110 // Retry doesn't make as much sense because the lock was just acquired.
2111 if (true) return -1 ;
2112 }
2113 }
2114
2115 // NotRunnable() -- informed spinning
2116 //
2117 // Don't bother spinning if the owner is not eligible to drop the lock.
2118 // Peek at the owner's schedctl.sc_state and Thread._thread_values and
2119 // spin only if the owner thread is _thread_in_Java or _thread_in_vm.
2120 // The thread must be runnable in order to drop the lock in timely fashion.
2121 // If the _owner is not runnable then spinning will not likely be
2122 // successful (profitable).
2123 //
2124 // Beware -- the thread referenced by _owner could have died
2125 // so a simply fetch from _owner->_thread_state might trap.
2126 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
2127 // Because of the lifecycle issues the schedctl and _thread_state values
2128 // observed by NotRunnable() might be garbage. NotRunnable must
2129 // tolerate this and consider the observed _thread_state value
2130 // as advisory.
2131 //
2132 // Beware too, that _owner is sometimes a BasicLock address and sometimes
2133 // a thread pointer. We differentiate the two cases with OwnerIsThread.
2134 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
2135 // with the LSB of _owner. Another option would be to probablistically probe
2136 // the putative _owner->TypeTag value.
2137 //
2138 // Checking _thread_state isn't perfect. Even if the thread is
2139 // in_java it might be blocked on a page-fault or have been preempted
2140 // and sitting on a ready/dispatch queue. _thread state in conjunction
2141 // with schedctl.sc_state gives us a good picture of what the
2142 // thread is doing, however.
2143 //
2144 // TODO: check schedctl.sc_state.
2145 // We'll need to use SafeFetch32() to read from the schedctl block.
2146 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
2147 //
2148 // The return value from NotRunnable() is *advisory* -- the
2149 // result is based on sampling and is not necessarily coherent.
2150 // The caller must tolerate false-negative and false-positive errors.
2151 // Spinning, in general, is probabilistic anyway.
2152
2153
2154 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
2155 // Check either OwnerIsThread or ox->TypeTag == 2BAD.
2156 if (!OwnerIsThread) return 0 ;
2157
2158 if (ox == NULL) return 0 ;
2159
2160 // Avoid transitive spinning ...
2161 // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L.
2162 // Immediately after T1 acquires L it's possible that T2, also
2163 // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
2164 // This occurs transiently after T1 acquired L but before
2165 // T1 managed to clear T1.Stalled. T2 does not need to abort
2166 // its spin in this circumstance.
2167 intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
2168
2169 if (BlockedOn == 1) return 1 ;
2170 if (BlockedOn != 0) {
2171 return BlockedOn != intptr_t(this) && _owner == ox ;
2172 }
2173
2174 assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
2175 int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
2176 // consider also: jst != _thread_in_Java -- but that's overspecific.
2177 return jst == _thread_blocked || jst == _thread_in_native ;
2178 }
2179
2180
2181 // Adaptive spin-then-block - rational spinning
2182 //
2183 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
2184 // algorithm. On high order SMP systems it would be better to start with
2185 // a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH,
2186 // a contending thread could enqueue itself on the cxq and then spin locally
2187 // on a thread-specific variable such as its ParkEvent._Event flag.
2188 // That's left as an exercise for the reader. Note that global spinning is
2189 // not problematic on Niagara, as the L2$ serves the interconnect and has both
2190 // low latency and massive bandwidth.
2191 //
2192 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
2193 // acquisition attempts where we opt to spin -- at 100% and vary the spin count
2194 // (duration) or we can fix the count at approximately the duration of
2195 // a context switch and vary the frequency. Of course we could also
2196 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
2197 // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
2198 //
2199 // This implementation varies the duration "D", where D varies with
2200 // the success rate of recent spin attempts. (D is capped at approximately
2201 // length of a round-trip context switch). The success rate for recent
2202 // spin attempts is a good predictor of the success rate of future spin
2203 // attempts. The mechanism adapts automatically to varying critical
2204 // section length (lock modality), system load and degree of parallelism.
2205 // D is maintained per-monitor in _SpinDuration and is initialized
2206 // optimistically. Spin frequency is fixed at 100%.
2207 //
2208 // Note that _SpinDuration is volatile, but we update it without locks
2209 // or atomics. The code is designed so that _SpinDuration stays within
2210 // a reasonable range even in the presence of races. The arithmetic
2211 // operations on _SpinDuration are closed over the domain of legal values,
2212 // so at worst a race will install and older but still legal value.
2213 // At the very worst this introduces some apparent non-determinism.
2214 // We might spin when we shouldn't or vice-versa, but since the spin
2215 // count are relatively short, even in the worst case, the effect is harmless.
2216 //
2217 // Care must be taken that a low "D" value does not become an
2218 // an absorbing state. Transient spinning failures -- when spinning
2219 // is overall profitable -- should not cause the system to converge
2220 // on low "D" values. We want spinning to be stable and predictable
2221 // and fairly responsive to change and at the same time we don't want
2222 // it to oscillate, become metastable, be "too" non-deterministic,
2223 // or converge on or enter undesirable stable absorbing states.
2224 //
2225 // We implement a feedback-based control system -- using past behavior
2226 // to predict future behavior. We face two issues: (a) if the
2227 // input signal is random then the spin predictor won't provide optimal
2228 // results, and (b) if the signal frequency is too high then the control
2229 // system, which has some natural response lag, will "chase" the signal.
2230 // (b) can arise from multimodal lock hold times. Transient preemption
2231 // can also result in apparent bimodal lock hold times.
2232 // Although sub-optimal, neither condition is particularly harmful, as
2233 // in the worst-case we'll spin when we shouldn't or vice-versa.
2234 // The maximum spin duration is rather short so the failure modes aren't bad.
2235 // To be conservative, I've tuned the gain in system to bias toward
2236 // _not spinning. Relatedly, the system can sometimes enter a mode where it
2237 // "rings" or oscillates between spinning and not spinning. This happens
2238 // when spinning is just on the cusp of profitability, however, so the
2239 // situation is not dire. The state is benign -- there's no need to add
2240 // hysteresis control to damp the transition rate between spinning and
2241 // not spinning.
2242 //
2243 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2244 //
2245 // Spin-then-block strategies ...
2246 //
2247 // Thoughts on ways to improve spinning :
2248 //
2249 // * Periodically call {psr_}getloadavg() while spinning, and
2250 // permit unbounded spinning if the load average is <
2251 // the number of processors. Beware, however, that getloadavg()
2252 // is exceptionally fast on solaris (about 1/10 the cost of a full
2253 // spin cycle, but quite expensive on linux. Beware also, that
2254 // multiple JVMs could "ring" or oscillate in a feedback loop.
2255 // Sufficient damping would solve that problem.
2256 //
2257 // * We currently use spin loops with iteration counters to approximate
2258 // spinning for some interval. Given the availability of high-precision
2259 // time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should
2260 // someday reimplement the spin loops to duration-based instead of iteration-based.
2261 //
2262 // * Don't spin if there are more than N = (CPUs/2) threads
2263 // currently spinning on the monitor (or globally).
2264 // That is, limit the number of concurrent spinners.
2265 // We might also limit the # of spinners in the JVM, globally.
2266 //
2267 // * If a spinning thread observes _owner change hands it should
2268 // abort the spin (and park immediately) or at least debit
2269 // the spin counter by a large "penalty".
2270 //
2271 // * Classically, the spin count is either K*(CPUs-1) or is a
2272 // simple constant that approximates the length of a context switch.
2273 // We currently use a value -- computed by a special utility -- that
2274 // approximates round-trip context switch times.
2275 //
2276 // * Normally schedctl_start()/_stop() is used to advise the kernel
2277 // to avoid preempting threads that are running in short, bounded
2278 // critical sections. We could use the schedctl hooks in an inverted
2279 // sense -- spinners would set the nopreempt flag, but poll the preempt
2280 // pending flag. If a spinner observed a pending preemption it'd immediately
2281 // abort the spin and park. As such, the schedctl service acts as
2282 // a preemption warning mechanism.
2283 //
2284 // * In lieu of spinning, if the system is running below saturation
2285 // (that is, loadavg() << #cpus), we can instead suppress futile
2286 // wakeup throttling, or even wake more than one successor at exit-time.
2287 // The net effect is largely equivalent to spinning. In both cases,
2288 // contending threads go ONPROC and opportunistically attempt to acquire
2289 // the lock, decreasing lock handover latency at the expense of wasted
2290 // cycles and context switching.
2291 //
2292 // * We might to spin less after we've parked as the thread will
2293 // have less $ and TLB affinity with the processor.
2294 // Likewise, we might spin less if we come ONPROC on a different
2295 // processor or after a long period (>> rechose_interval).
2296 //
2297 // * A table-driven state machine similar to Solaris' dispadmin scheduling
2298 // tables might be a better design. Instead of encoding information in
2299 // _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit,
2300 // discrete states. Success or failure during a spin would drive
2301 // state transitions, and each state node would contain a spin count.
2302 //
2303 // * If the processor is operating in a mode intended to conserve power
2304 // (such as Intel's SpeedStep) or to reduce thermal output (thermal
2305 // step-down mode) then the Java synchronization subsystem should
2306 // forgo spinning.
2307 //
2308 // * The minimum spin duration should be approximately the worst-case
2309 // store propagation latency on the platform. That is, the time
2310 // it takes a store on CPU A to become visible on CPU B, where A and
2311 // B are "distant".
2312 //
2313 // * We might want to factor a thread's priority in the spin policy.
2314 // Threads with a higher priority might spin for slightly longer.
2315 // Similarly, if we use back-off in the TATAS loop, lower priority
2316 // threads might back-off longer. We don't currently use a
2317 // thread's priority when placing it on the entry queue. We may
2318 // want to consider doing so in future releases.
2319 //
2320 // * We might transiently drop a thread's scheduling priority while it spins.
2321 // SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris
2322 // would suffice. We could even consider letting the thread spin indefinitely at
2323 // a depressed or "idle" priority. This brings up fairness issues, however --
2324 // in a saturated system a thread would with a reduced priority could languish
2325 // for extended periods on the ready queue.
2326 //
2327 // * While spinning try to use the otherwise wasted time to help the VM make
2328 // progress:
2329 //
2330 // -- YieldTo() the owner, if the owner is OFFPROC but ready
2331 // Done our remaining quantum directly to the ready thread.
2332 // This helps "push" the lock owner through the critical section.
2333 // It also tends to improve affinity/locality as the lock
2334 // "migrates" less frequently between CPUs.
2335 // -- Walk our own stack in anticipation of blocking. Memoize the roots.
2336 // -- Perform strand checking for other thread. Unpark potential strandees.
2337 // -- Help GC: trace or mark -- this would need to be a bounded unit of work.
2338 // Unfortunately this will pollute our $ and TLBs. Recall that we
2339 // spin to avoid context switching -- context switching has an
2340 // immediate cost in latency, a disruptive cost to other strands on a CMT
2341 // processor, and an amortized cost because of the D$ and TLB cache
2342 // reload transient when the thread comes back ONPROC and repopulates
2343 // $s and TLBs.
2344 // -- call getloadavg() to see if the system is saturated. It'd probably
2345 // make sense to call getloadavg() half way through the spin.
2346 // If the system isn't at full capacity the we'd simply reset
2347 // the spin counter to and extend the spin attempt.
2348 // -- Doug points out that we should use the same "helping" policy
2349 // in thread.yield().
2350 //
2351 // * Try MONITOR-MWAIT on systems that support those instructions.
2352 //
2353 // * The spin statistics that drive spin decisions & frequency are
2354 // maintained in the objectmonitor structure so if we deflate and reinflate
2355 // we lose spin state. In practice this is not usually a concern
2356 // as the default spin state after inflation is aggressive (optimistic)
2357 // and tends toward spinning. So in the worst case for a lock where
2358 // spinning is not profitable we may spin unnecessarily for a brief
2359 // period. But then again, if a lock is contended it'll tend not to deflate
2360 // in the first place.
2361
2362
2363 intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
2364 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
2365
2366 // Spinning: Fixed frequency (100%), vary duration
2367
2368 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
2369
2370 // Dumb, brutal spin. Good for comparative measurements against adaptive spinning.
2371 int ctr = Knob_FixedSpin ;
2372 if (ctr != 0) {
2373 while (--ctr >= 0) {
2374 if (TryLock (Self) > 0) return 1 ;
2375 SpinPause () ;
2376 }
2377 return 0 ;
2378 }
2379
2380 for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
2381 if (TryLock(Self) > 0) {
2382 // Increase _SpinDuration ...
2383 // Note that we don't clamp SpinDuration precisely at SpinLimit.
2384 // Raising _SpurDuration to the poverty line is key.
2385 int x = _SpinDuration ;
2386 if (x < Knob_SpinLimit) {
2387 if (x < Knob_Poverty) x = Knob_Poverty ;
2388 _SpinDuration = x + Knob_BonusB ;
2389 }
2390 return 1 ;
2391 }
2392 SpinPause () ;
2393 }
2394
2395 // Admission control - verify preconditions for spinning
2396 //
2397 // We always spin a little bit, just to prevent _SpinDuration == 0 from
2398 // becoming an absorbing state. Put another way, we spin briefly to
2399 // sample, just in case the system load, parallelism, contention, or lock
2400 // modality changed.
2401 //
2402 // Consider the following alternative:
2403 // Periodically set _SpinDuration = _SpinLimit and try a long/full
2404 // spin attempt. "Periodically" might mean after a tally of
2405 // the # of failed spin attempts (or iterations) reaches some threshold.
2406 // This takes us into the realm of 1-out-of-N spinning, where we
2407 // hold the duration constant but vary the frequency.
2408
2409 ctr = _SpinDuration ;
2410 if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
2411 if (ctr <= 0) return 0 ;
2412
2413 if (Knob_SuccRestrict && _succ != NULL) return 0 ;
2414 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
2415 TEVENT (Spin abort - notrunnable [TOP]);
2416 return 0 ;
2417 }
2418
2419 int MaxSpin = Knob_MaxSpinners ;
2420 if (MaxSpin >= 0) {
2421 if (_Spinner > MaxSpin) {
2422 TEVENT (Spin abort -- too many spinners) ;
2423 return 0 ;
2424 }
2425 // Slighty racy, but benign ...
2426 Adjust (&_Spinner, 1) ;
2427 }
2428
2429 // We're good to spin ... spin ingress.
2430 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
2431 // when preparing to LD...CAS _owner, etc and the CAS is likely
2432 // to succeed.
2433 int hits = 0 ;
2434 int msk = 0 ;
2435 int caspty = Knob_CASPenalty ;
2436 int oxpty = Knob_OXPenalty ;
2437 int sss = Knob_SpinSetSucc ;
2438 if (sss && _succ == NULL ) _succ = Self ;
2439 Thread * prv = NULL ;
2440
2441 // There are three ways to exit the following loop:
2442 // 1. A successful spin where this thread has acquired the lock.
2443 // 2. Spin failure with prejudice
2444 // 3. Spin failure without prejudice
2445
2446 while (--ctr >= 0) {
2447
2448 // Periodic polling -- Check for pending GC
2449 // Threads may spin while they're unsafe.
2450 // We don't want spinning threads to delay the JVM from reaching
2451 // a stop-the-world safepoint or to steal cycles from GC.
2452 // If we detect a pending safepoint we abort in order that
2453 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
2454 // this thread, if safe, doesn't steal cycles from GC.
2455 // This is in keeping with the "no loitering in runtime" rule.
2456 // We periodically check to see if there's a safepoint pending.
2457 if ((ctr & 0xFF) == 0) {
2458 if (SafepointSynchronize::do_call_back()) {
2459 TEVENT (Spin: safepoint) ;
2460 goto Abort ; // abrupt spin egress
2461 }
2462 if (Knob_UsePause & 1) SpinPause () ;
2463
2464 int (*scb)(intptr_t,int) = SpinCallbackFunction ;
2465 if (hits > 50 && scb != NULL) {
2466 int abend = (*scb)(SpinCallbackArgument, 0) ;
2467 }
2468 }
2469
2470 if (Knob_UsePause & 2) SpinPause() ;
2471
2472 // Exponential back-off ... Stay off the bus to reduce coherency traffic.
2473 // This is useful on classic SMP systems, but is of less utility on
2474 // N1-style CMT platforms.
2475 //
2476 // Trade-off: lock acquisition latency vs coherency bandwidth.
2477 // Lock hold times are typically short. A histogram
2478 // of successful spin attempts shows that we usually acquire
2479 // the lock early in the spin. That suggests we want to
2480 // sample _owner frequently in the early phase of the spin,
2481 // but then back-off and sample less frequently as the spin
2482 // progresses. The back-off makes a good citizen on SMP big
2483 // SMP systems. Oversampling _owner can consume excessive
2484 // coherency bandwidth. Relatedly, if we _oversample _owner we
2485 // can inadvertently interfere with the the ST m->owner=null.
2486 // executed by the lock owner.
2487 if (ctr & msk) continue ;
2488 ++hits ;
2489 if ((hits & 0xF) == 0) {
2490 // The 0xF, above, corresponds to the exponent.
2491 // Consider: (msk+1)|msk
2492 msk = ((msk << 2)|3) & BackOffMask ;
2493 }
2494
2495 // Probe _owner with TATAS
2496 // If this thread observes the monitor transition or flicker
2497 // from locked to unlocked to locked, then the odds that this
2498 // thread will acquire the lock in this spin attempt go down
2499 // considerably. The same argument applies if the CAS fails
2500 // or if we observe _owner change from one non-null value to
2501 // another non-null value. In such cases we might abort
2502 // the spin without prejudice or apply a "penalty" to the
2503 // spin count-down variable "ctr", reducing it by 100, say.
2504
2505 Thread * ox = (Thread *) _owner ;
2506 if (ox == NULL) {
2507 ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
2508 if (ox == NULL) {
2509 // The CAS succeeded -- this thread acquired ownership
2510 // Take care of some bookkeeping to exit spin state.
2511 if (sss && _succ == Self) {
2512 _succ = NULL ;
2513 }
2514 if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
2515
2516 // Increase _SpinDuration :
2517 // The spin was successful (profitable) so we tend toward
2518 // longer spin attempts in the future.
2519 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
2520 // If we acquired the lock early in the spin cycle it
2521 // makes sense to increase _SpinDuration proportionally.
2522 // Note that we don't clamp SpinDuration precisely at SpinLimit.
2523 int x = _SpinDuration ;
2524 if (x < Knob_SpinLimit) {
2525 if (x < Knob_Poverty) x = Knob_Poverty ;
2526 _SpinDuration = x + Knob_Bonus ;
2527 }
2528 return 1 ;
2529 }
2530
2531 // The CAS failed ... we can take any of the following actions:
2532 // * penalize: ctr -= Knob_CASPenalty
2533 // * exit spin with prejudice -- goto Abort;
2534 // * exit spin without prejudice.
2535 // * Since CAS is high-latency, retry again immediately.
2536 prv = ox ;
2537 TEVENT (Spin: cas failed) ;
2538 if (caspty == -2) break ;
2539 if (caspty == -1) goto Abort ;
2540 ctr -= caspty ;
2541 continue ;
2542 }
2543
2544 // Did lock ownership change hands ?
2545 if (ox != prv && prv != NULL ) {
2546 TEVENT (spin: Owner changed)
2547 if (oxpty == -2) break ;
2548 if (oxpty == -1) goto Abort ;
2549 ctr -= oxpty ;
2550 }
2551 prv = ox ;
2552
2553 // Abort the spin if the owner is not executing.
2554 // The owner must be executing in order to drop the lock.
2555 // Spinning while the owner is OFFPROC is idiocy.
2556 // Consider: ctr -= RunnablePenalty ;
2557 if (Knob_OState && NotRunnable (Self, ox)) {
2558 TEVENT (Spin abort - notrunnable);
2559 goto Abort ;
2560 }
2561 if (sss && _succ == NULL ) _succ = Self ;
2562 }
2563
2564 // Spin failed with prejudice -- reduce _SpinDuration.
2565 // TODO: Use an AIMD-like policy to adjust _SpinDuration.
2566 // AIMD is globally stable.
2567 TEVENT (Spin failure) ;
2568 {
2569 int x = _SpinDuration ;
2570 if (x > 0) {
2571 // Consider an AIMD scheme like: x -= (x >> 3) + 100
2572 // This is globally sample and tends to damp the response.
2573 x -= Knob_Penalty ;
2574 if (x < 0) x = 0 ;
2575 _SpinDuration = x ;
2576 }
2577 }
2578
2579 Abort:
2580 if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
2581 if (sss && _succ == Self) {
2582 _succ = NULL ;
2583 // Invariant: after setting succ=null a contending thread
2584 // must recheck-retry _owner before parking. This usually happens
2585 // in the normal usage of TrySpin(), but it's safest
2586 // to make TrySpin() as foolproof as possible.
2587 OrderAccess::fence() ;
2588 if (TryLock(Self) > 0) return 1 ;
2589 }
2590 return 0 ;
2591 }
2592
2593 #define TrySpin TrySpin_VaryDuration
2594
2595 static void DeferredInitialize () {
2596 if (InitDone > 0) return ;
2597 if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
2598 while (InitDone != 1) ;
2599 return ;
2600 }
2601
2602 // One-shot global initialization ...
2603 // The initialization is idempotent, so we don't need locks.
2604 // In the future consider doing this via os::init_2().
2605 // SyncKnobs consist of <Key>=<Value> pairs in the style
2606 // of environment variables. Start by converting ':' to NUL.
2607
2608 if (SyncKnobs == NULL) SyncKnobs = "" ;
2609
2610 size_t sz = strlen (SyncKnobs) ;
2611 char * knobs = (char *) malloc (sz + 2) ;
2612 if (knobs == NULL) {
2613 vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
2614 guarantee (0, "invariant") ;
2615 }
2616 strcpy (knobs, SyncKnobs) ;
2617 knobs[sz+1] = 0 ;
2618 for (char * p = knobs ; *p ; p++) {
2619 if (*p == ':') *p = 0 ;
2620 }
2621
2622 #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
2623 SETKNOB(ReportSettings) ;
2624 SETKNOB(Verbose) ;
2625 SETKNOB(FixedSpin) ;
2626 SETKNOB(SpinLimit) ;
2627 SETKNOB(SpinBase) ;
2628 SETKNOB(SpinBackOff);
2629 SETKNOB(CASPenalty) ;
2630 SETKNOB(OXPenalty) ;
2631 SETKNOB(LogSpins) ;
2632 SETKNOB(SpinSetSucc) ;
2633 SETKNOB(SuccEnabled) ;
2634 SETKNOB(SuccRestrict) ;
2635 SETKNOB(Penalty) ;
2636 SETKNOB(Bonus) ;
2637 SETKNOB(BonusB) ;
2638 SETKNOB(Poverty) ;
2639 SETKNOB(SpinAfterFutile) ;
2640 SETKNOB(UsePause) ;
2641 SETKNOB(SpinEarly) ;
2642 SETKNOB(OState) ;
2643 SETKNOB(MaxSpinners) ;
2644 SETKNOB(PreSpin) ;
2645 SETKNOB(ExitPolicy) ;
2646 SETKNOB(QMode);
2647 SETKNOB(ResetEvent) ;
2648 SETKNOB(MoveNotifyee) ;
2649 SETKNOB(FastHSSEC) ;
2650 #undef SETKNOB
2651
2652 if (os::is_MP()) {
2653 BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
2654 if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
2655 // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
2656 } else {
2657 Knob_SpinLimit = 0 ;
2658 Knob_SpinBase = 0 ;
2659 Knob_PreSpin = 0 ;
2660 Knob_FixedSpin = -1 ;
2661 }
2662
2663 if (Knob_LogSpins == 0) {
2664 ObjectSynchronizer::_sync_FailedSpins = NULL ;
2665 }
2666
2667 free (knobs) ;
2668 OrderAccess::fence() ;
2669 InitDone = 1 ;
2670 }
2671
2672 // Theory of operations -- Monitors lists, thread residency, etc:
2673 //
2674 // * A thread acquires ownership of a monitor by successfully
2675 // CAS()ing the _owner field from null to non-null.
2676 //
2677 // * Invariant: A thread appears on at most one monitor list --
2678 // cxq, EntryList or WaitSet -- at any one time.
2679 //
2680 // * Contending threads "push" themselves onto the cxq with CAS
2681 // and then spin/park.
2682 //
2683 // * After a contending thread eventually acquires the lock it must
2684 // dequeue itself from either the EntryList or the cxq.
2685 //
2686 // * The exiting thread identifies and unparks an "heir presumptive"
2687 // tentative successor thread on the EntryList. Critically, the
2688 // exiting thread doesn't unlink the successor thread from the EntryList.
2689 // After having been unparked, the wakee will recontend for ownership of
2690 // the monitor. The successor (wakee) will either acquire the lock or
2691 // re-park itself.
2692 //
2693 // Succession is provided for by a policy of competitive handoff.
2694 // The exiting thread does _not_ grant or pass ownership to the
2695 // successor thread. (This is also referred to as "handoff" succession").
2696 // Instead the exiting thread releases ownership and possibly wakes
2697 // a successor, so the successor can (re)compete for ownership of the lock.
2698 // If the EntryList is empty but the cxq is populated the exiting
2699 // thread will drain the cxq into the EntryList. It does so by
2700 // by detaching the cxq (installing null with CAS) and folding
2701 // the threads from the cxq into the EntryList. The EntryList is
2702 // doubly linked, while the cxq is singly linked because of the
2703 // CAS-based "push" used to enqueue recently arrived threads (RATs).
2704 //
2705 // * Concurrency invariants:
2706 //
2707 // -- only the monitor owner may access or mutate the EntryList.
2708 // The mutex property of the monitor itself protects the EntryList
2709 // from concurrent interference.
2710 // -- Only the monitor owner may detach the cxq.
2711 //
2712 // * The monitor entry list operations avoid locks, but strictly speaking
2713 // they're not lock-free. Enter is lock-free, exit is not.
2714 // See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
2715 //
2716 // * The cxq can have multiple concurrent "pushers" but only one concurrent
2717 // detaching thread. This mechanism is immune from the ABA corruption.
2718 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
2719 //
2720 // * Taken together, the cxq and the EntryList constitute or form a
2721 // single logical queue of threads stalled trying to acquire the lock.
2722 // We use two distinct lists to improve the odds of a constant-time
2723 // dequeue operation after acquisition (in the ::enter() epilog) and
2724 // to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm).
2725 // A key desideratum is to minimize queue & monitor metadata manipulation
2726 // that occurs while holding the monitor lock -- that is, we want to
2727 // minimize monitor lock holds times. Note that even a small amount of
2728 // fixed spinning will greatly reduce the # of enqueue-dequeue operations
2729 // on EntryList|cxq. That is, spinning relieves contention on the "inner"
2730 // locks and monitor metadata.
2731 //
2732 // Cxq points to the the set of Recently Arrived Threads attempting entry.
2733 // Because we push threads onto _cxq with CAS, the RATs must take the form of
2734 // a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when
2735 // the unlocking thread notices that EntryList is null but _cxq is != null.
2736 //
2737 // The EntryList is ordered by the prevailing queue discipline and
2738 // can be organized in any convenient fashion, such as a doubly-linked list or
2739 // a circular doubly-linked list. Critically, we want insert and delete operations
2740 // to operate in constant-time. If we need a priority queue then something akin
2741 // to Solaris' sleepq would work nicely. Viz.,
2742 // http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
2743 // Queue discipline is enforced at ::exit() time, when the unlocking thread
2744 // drains the cxq into the EntryList, and orders or reorders the threads on the
2745 // EntryList accordingly.
2746 //
2747 // Barring "lock barging", this mechanism provides fair cyclic ordering,
2748 // somewhat similar to an elevator-scan.
2749 //
2750 // * The monitor synchronization subsystem avoids the use of native
2751 // synchronization primitives except for the narrow platform-specific
2752 // park-unpark abstraction. See the comments in os_solaris.cpp regarding
2753 // the semantics of park-unpark. Put another way, this monitor implementation
2754 // depends only on atomic operations and park-unpark. The monitor subsystem
2755 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
2756 // underlying OS manages the READY<->RUN transitions.
2757 //
2758 // * Waiting threads reside on the WaitSet list -- wait() puts
2759 // the caller onto the WaitSet.
2760 //
2761 // * notify() or notifyAll() simply transfers threads from the WaitSet to
2762 // either the EntryList or cxq. Subsequent exit() operations will
2763 // unpark the notifyee. Unparking a notifee in notify() is inefficient -
2764 // it's likely the notifyee would simply impale itself on the lock held
2765 // by the notifier.
2766 //
2767 // * An interesting alternative is to encode cxq as (List,LockByte) where
2768 // the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary
2769 // variable, like _recursions, in the scheme. The threads or Events that form
2770 // the list would have to be aligned in 256-byte addresses. A thread would
2771 // try to acquire the lock or enqueue itself with CAS, but exiting threads
2772 // could use a 1-0 protocol and simply STB to set the LockByte to 0.
2773 // Note that is is *not* word-tearing, but it does presume that full-word
2774 // CAS operations are coherent with intermix with STB operations. That's true
2775 // on most common processors.
2776 //
2777 // * See also http://blogs.sun.com/dave
2778
2779
2780 void ATTR ObjectMonitor::EnterI (TRAPS) {
2781 Thread * Self = THREAD ;
2782 assert (Self->is_Java_thread(), "invariant") ;
2783 assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ;
2784
2785 // Try the lock - TATAS
2786 if (TryLock (Self) > 0) {
2787 assert (_succ != Self , "invariant") ;
2788 assert (_owner == Self , "invariant") ;
2789 assert (_Responsible != Self , "invariant") ;
2790 return ;
2791 }
2792
2793 DeferredInitialize () ;
2794
2795 // We try one round of spinning *before* enqueueing Self.
2796 //
2797 // If the _owner is ready but OFFPROC we could use a YieldTo()
2798 // operation to donate the remainder of this thread's quantum
2799 // to the owner. This has subtle but beneficial affinity
2800 // effects.
2801
2802 if (TrySpin (Self) > 0) {
2803 assert (_owner == Self , "invariant") ;
2804 assert (_succ != Self , "invariant") ;
2805 assert (_Responsible != Self , "invariant") ;
2806 return ;
2807 }
2808
2809 // The Spin failed -- Enqueue and park the thread ...
2810 assert (_succ != Self , "invariant") ;
2811 assert (_owner != Self , "invariant") ;
2812 assert (_Responsible != Self , "invariant") ;
2813
2814 // Enqueue "Self" on ObjectMonitor's _cxq.
2815 //
2816 // Node acts as a proxy for Self.
2817 // As an aside, if were to ever rewrite the synchronization code mostly
2818 // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
2819 // Java objects. This would avoid awkward lifecycle and liveness issues,
2820 // as well as eliminate a subset of ABA issues.
2821 // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
2822 //
2823
2824 ObjectWaiter node(Self) ;
2825 Self->_ParkEvent->reset() ;
2826 node._prev = (ObjectWaiter *) 0xBAD ;
2827 node.TState = ObjectWaiter::TS_CXQ ;
2828
2829 // Push "Self" onto the front of the _cxq.
2830 // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
2831 // Note that spinning tends to reduce the rate at which threads
2832 // enqueue and dequeue on EntryList|cxq.
2833 ObjectWaiter * nxt ;
2834 for (;;) {
2835 node._next = nxt = _cxq ;
2836 if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
2837
2838 // Interference - the CAS failed because _cxq changed. Just retry.
2839 // As an optional optimization we retry the lock.
2840 if (TryLock (Self) > 0) {
2841 assert (_succ != Self , "invariant") ;
2842 assert (_owner == Self , "invariant") ;
2843 assert (_Responsible != Self , "invariant") ;
2844 return ;
2845 }
2846 }
2847
2848 // Check for cxq|EntryList edge transition to non-null. This indicates
2849 // the onset of contention. While contention persists exiting threads
2850 // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit
2851 // operations revert to the faster 1-0 mode. This enter operation may interleave
2852 // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
2853 // arrange for one of the contending thread to use a timed park() operations
2854 // to detect and recover from the race. (Stranding is form of progress failure
2855 // where the monitor is unlocked but all the contending threads remain parked).
2856 // That is, at least one of the contended threads will periodically poll _owner.
2857 // One of the contending threads will become the designated "Responsible" thread.
2858 // The Responsible thread uses a timed park instead of a normal indefinite park
2859 // operation -- it periodically wakes and checks for and recovers from potential
2860 // strandings admitted by 1-0 exit operations. We need at most one Responsible
2861 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
2862 // be responsible for a monitor.
2863 //
2864 // Currently, one of the contended threads takes on the added role of "Responsible".
2865 // A viable alternative would be to use a dedicated "stranding checker" thread
2866 // that periodically iterated over all the threads (or active monitors) and unparked
2867 // successors where there was risk of stranding. This would help eliminate the
2868 // timer scalability issues we see on some platforms as we'd only have one thread
2869 // -- the checker -- parked on a timer.
2870
2871 if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
2872 // Try to assume the role of responsible thread for the monitor.
2873 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
2874 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
2875 }
2876
2877 // The lock have been released while this thread was occupied queueing
2878 // itself onto _cxq. To close the race and avoid "stranding" and
2879 // progress-liveness failure we must resample-retry _owner before parking.
2880 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
2881 // In this case the ST-MEMBAR is accomplished with CAS().
2882 //
2883 // TODO: Defer all thread state transitions until park-time.
2884 // Since state transitions are heavy and inefficient we'd like
2885 // to defer the state transitions until absolutely necessary,
2886 // and in doing so avoid some transitions ...
2887
2888 TEVENT (Inflated enter - Contention) ;
2889 int nWakeups = 0 ;
2890 int RecheckInterval = 1 ;
2891
2892 for (;;) {
2893
2894 if (TryLock (Self) > 0) break ;
2895 assert (_owner != Self, "invariant") ;
2896
2897 if ((SyncFlags & 2) && _Responsible == NULL) {
2898 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
2899 }
2900
2901 // park self
2902 if (_Responsible == Self || (SyncFlags & 1)) {
2903 TEVENT (Inflated enter - park TIMED) ;
2904 Self->_ParkEvent->park ((jlong) RecheckInterval) ;
2905 // Increase the RecheckInterval, but clamp the value.
2906 RecheckInterval *= 8 ;
2907 if (RecheckInterval > 1000) RecheckInterval = 1000 ;
2908 } else {
2909 TEVENT (Inflated enter - park UNTIMED) ;
2910 Self->_ParkEvent->park() ;
2911 }
2912
2913 if (TryLock(Self) > 0) break ;
2914
2915 // The lock is still contested.
2916 // Keep a tally of the # of futile wakeups.
2917 // Note that the counter is not protected by a lock or updated by atomics.
2918 // That is by design - we trade "lossy" counters which are exposed to
2919 // races during updates for a lower probe effect.
2920 TEVENT (Inflated enter - Futile wakeup) ;
2921 if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
2922 ObjectSynchronizer::_sync_FutileWakeups->inc() ;
2923 }
2924 ++ nWakeups ;
2925
2926 if (THREAD->is_Compiler_thread() && nWakeups >= 5) {
2927 assert(false, "Compiler thread blocked by lock");
2928 }
2929
2930 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
2931 // We can defer clearing _succ until after the spin completes
2932 // TrySpin() must tolerate being called with _succ == Self.
2933 // Try yet another round of adaptive spinning.
2934 if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
2935
2936 // We can find that we were unpark()ed and redesignated _succ while
2937 // we were spinning. That's harmless. If we iterate and call park(),
2938 // park() will consume the event and return immediately and we'll
2939 // just spin again. This pattern can repeat, leaving _succ to simply
2940 // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks().
2941 // Alternately, we can sample fired() here, and if set, forgo spinning
2942 // in the next iteration.
2943
2944 if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
2945 Self->_ParkEvent->reset() ;
2946 OrderAccess::fence() ;
2947 }
2948 if (_succ == Self) _succ = NULL ;
2949
2950 // Invariant: after clearing _succ a thread *must* retry _owner before parking.
2951 OrderAccess::fence() ;
2952 }
2953
2954 // Egress :
2955 // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
2956 // Normally we'll find Self on the EntryList .
2957 // From the perspective of the lock owner (this thread), the
2958 // EntryList is stable and cxq is prepend-only.
2959 // The head of cxq is volatile but the interior is stable.
2960 // In addition, Self.TState is stable.
2961
2962 assert (_owner == Self , "invariant") ;
2963 assert (object() != NULL , "invariant") ;
2964 // I'd like to write:
2965 // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
2966 // but as we're at a safepoint that's not safe.
2967
2968 UnlinkAfterAcquire (Self, &node) ;
2969 if (_succ == Self) _succ = NULL ;
2970
2971 assert (_succ != Self, "invariant") ;
2972 if (_Responsible == Self) {
2973 _Responsible = NULL ;
2974 // Dekker pivot-point.
2975 // Consider OrderAccess::storeload() here
2976
2977 // We may leave threads on cxq|EntryList without a designated
2978 // "Responsible" thread. This is benign. When this thread subsequently
2979 // exits the monitor it can "see" such preexisting "old" threads --
2980 // threads that arrived on the cxq|EntryList before the fence, above --
2981 // by LDing cxq|EntryList. Newly arrived threads -- that is, threads
2982 // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
2983 // non-null and elect a new "Responsible" timer thread.
2984 //
2985 // This thread executes:
2986 // ST Responsible=null; MEMBAR (in enter epilog - here)
2987 // LD cxq|EntryList (in subsequent exit)
2988 //
2989 // Entering threads in the slow/contended path execute:
2990 // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
2991 // The (ST cxq; MEMBAR) is accomplished with CAS().
2992 //
2993 // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
2994 // exit operation from floating above the ST Responsible=null.
2995 //
2996 // In *practice* however, EnterI() is always followed by some atomic
2997 // operation such as the decrement of _count in ::enter(). Those atomics
2998 // obviate the need for the explicit MEMBAR, above.
2999 }
3000
3001 // We've acquired ownership with CAS().
3002 // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
3003 // But since the CAS() this thread may have also stored into _succ,
3004 // EntryList, cxq or Responsible. These meta-data updates must be
3005 // visible __before this thread subsequently drops the lock.
3006 // Consider what could occur if we didn't enforce this constraint --
3007 // STs to monitor meta-data and user-data could reorder with (become
3008 // visible after) the ST in exit that drops ownership of the lock.
3009 // Some other thread could then acquire the lock, but observe inconsistent
3010 // or old monitor meta-data and heap data. That violates the JMM.
3011 // To that end, the 1-0 exit() operation must have at least STST|LDST
3012 // "release" barrier semantics. Specifically, there must be at least a
3013 // STST|LDST barrier in exit() before the ST of null into _owner that drops
3014 // the lock. The barrier ensures that changes to monitor meta-data and data
3015 // protected by the lock will be visible before we release the lock, and
3016 // therefore before some other thread (CPU) has a chance to acquire the lock.
3017 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
3018 //
3019 // Critically, any prior STs to _succ or EntryList must be visible before
3020 // the ST of null into _owner in the *subsequent* (following) corresponding
3021 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
3022 // execute a serializing instruction.
3023
3024 if (SyncFlags & 8) {
3025 OrderAccess::fence() ;
3026 }
3027 return ;
3028 }
3029
3030 // ExitSuspendEquivalent:
3031 // A faster alternate to handle_special_suspend_equivalent_condition()
3032 //
3033 // handle_special_suspend_equivalent_condition() unconditionally
3034 // acquires the SR_lock. On some platforms uncontended MutexLocker()
3035 // operations have high latency. Note that in ::enter() we call HSSEC
3036 // while holding the monitor, so we effectively lengthen the critical sections.
3037 //
3038 // There are a number of possible solutions:
3039 //
3040 // A. To ameliorate the problem we might also defer state transitions
3041 // to as late as possible -- just prior to parking.
3042 // Given that, we'd call HSSEC after having returned from park(),
3043 // but before attempting to acquire the monitor. This is only a
3044 // partial solution. It avoids calling HSSEC while holding the
3045 // monitor (good), but it still increases successor reacquisition latency --
3046 // the interval between unparking a successor and the time the successor
3047 // resumes and retries the lock. See ReenterI(), which defers state transitions.
3048 // If we use this technique we can also avoid EnterI()-exit() loop
3049 // in ::enter() where we iteratively drop the lock and then attempt
3050 // to reacquire it after suspending.
3051 //
3052 // B. In the future we might fold all the suspend bits into a
3053 // composite per-thread suspend flag and then update it with CAS().
3054 // Alternately, a Dekker-like mechanism with multiple variables
3055 // would suffice:
3056 // ST Self->_suspend_equivalent = false
3057 // MEMBAR
3058 // LD Self_>_suspend_flags
3059 //
3060
3061
3062 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
3063 int Mode = Knob_FastHSSEC ;
3064 if (Mode && !jSelf->is_external_suspend()) {
3065 assert (jSelf->is_suspend_equivalent(), "invariant") ;
3066 jSelf->clear_suspend_equivalent() ;
3067 if (2 == Mode) OrderAccess::storeload() ;
3068 if (!jSelf->is_external_suspend()) return false ;
3069 // We raced a suspension -- fall thru into the slow path
3070 TEVENT (ExitSuspendEquivalent - raced) ;
3071 jSelf->set_suspend_equivalent() ;
3072 }
3073 return jSelf->handle_special_suspend_equivalent_condition() ;
3074 }
3075
3076
3077 // ReenterI() is a specialized inline form of the latter half of the
3078 // contended slow-path from EnterI(). We use ReenterI() only for
3079 // monitor reentry in wait().
3080 //
3081 // In the future we should reconcile EnterI() and ReenterI(), adding
3082 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
3083 // loop accordingly.
3084
3085 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
3086 assert (Self != NULL , "invariant") ;
3087 assert (SelfNode != NULL , "invariant") ;
3088 assert (SelfNode->_thread == Self , "invariant") ;
3089 assert (_waiters > 0 , "invariant") ;
3090 assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
3091 assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
3092 JavaThread * jt = (JavaThread *) Self ;
3093
3094 int nWakeups = 0 ;
3095 for (;;) {
3096 ObjectWaiter::TStates v = SelfNode->TState ;
3097 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
3098 assert (_owner != Self, "invariant") ;
3099
3100 if (TryLock (Self) > 0) break ;
3101 if (TrySpin (Self) > 0) break ;
3102
3103 TEVENT (Wait Reentry - parking) ;
3104
3105 // State transition wrappers around park() ...
3106 // ReenterI() wisely defers state transitions until
3107 // it's clear we must park the thread.
3108 {
3109 OSThreadContendState osts(Self->osthread());
3110 ThreadBlockInVM tbivm(jt);
3111
3112 // cleared by handle_special_suspend_equivalent_condition()
3113 // or java_suspend_self()
3114 jt->set_suspend_equivalent();
3115 if (SyncFlags & 1) {
3116 Self->_ParkEvent->park ((jlong)1000) ;
3117 } else {
3118 Self->_ParkEvent->park () ;
3119 }
3120
3121 // were we externally suspended while we were waiting?
3122 for (;;) {
3123 if (!ExitSuspendEquivalent (jt)) break ;
3124 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
3125 jt->java_suspend_self();
3126 jt->set_suspend_equivalent();
3127 }
3128 }
3129
3130 // Try again, but just so we distinguish between futile wakeups and
3131 // successful wakeups. The following test isn't algorithmically
3132 // necessary, but it helps us maintain sensible statistics.
3133 if (TryLock(Self) > 0) break ;
3134
3135 // The lock is still contested.
3136 // Keep a tally of the # of futile wakeups.
3137 // Note that the counter is not protected by a lock or updated by atomics.
3138 // That is by design - we trade "lossy" counters which are exposed to
3139 // races during updates for a lower probe effect.
3140 TEVENT (Wait Reentry - futile wakeup) ;
3141 ++ nWakeups ;
3142
3143 // Assuming this is not a spurious wakeup we'll normally
3144 // find that _succ == Self.
3145 if (_succ == Self) _succ = NULL ;
3146
3147 // Invariant: after clearing _succ a contending thread
3148 // *must* retry _owner before parking.
3149 OrderAccess::fence() ;
3150
3151 if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
3152 ObjectSynchronizer::_sync_FutileWakeups->inc() ;
3153 }
3154 }
3155
3156 // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
3157 // Normally we'll find Self on the EntryList.
3158 // Unlinking from the EntryList is constant-time and atomic-free.
3159 // From the perspective of the lock owner (this thread), the
3160 // EntryList is stable and cxq is prepend-only.
3161 // The head of cxq is volatile but the interior is stable.
3162 // In addition, Self.TState is stable.
3163
3164 assert (_owner == Self, "invariant") ;
3165 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3166 UnlinkAfterAcquire (Self, SelfNode) ;
3167 if (_succ == Self) _succ = NULL ;
3168 assert (_succ != Self, "invariant") ;
3169 SelfNode->TState = ObjectWaiter::TS_RUN ;
3170 OrderAccess::fence() ; // see comments at the end of EnterI()
3171 }
3172
3173 bool ObjectMonitor::try_enter(Thread* THREAD) {
3174 if (THREAD != _owner) {
3175 if (THREAD->is_lock_owned ((address)_owner)) {
3176 assert(_recursions == 0, "internal state error");
3177 _owner = THREAD ;
3178 _recursions = 1 ;
3179 OwnerIsThread = 1 ;
3180 return true;
3181 }
3182 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
3183 return false;
3184 }
3185 return true;
3186 } else {
3187 _recursions++;
3188 return true;
3189 }
3190 }
3191
3192 void ATTR ObjectMonitor::enter(TRAPS) {
3193 // The following code is ordered to check the most common cases first
3194 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
3195 Thread * const Self = THREAD ;
3196 void * cur ;
3197
3198 cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
3199 if (cur == NULL) {
3200 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
3201 assert (_recursions == 0 , "invariant") ;
3202 assert (_owner == Self, "invariant") ;
3203 // CONSIDER: set or assert OwnerIsThread == 1
3204 return ;
3205 }
3206
3207 if (cur == Self) {
3208 // TODO-FIXME: check for integer overflow! BUGID 6557169.
3209 _recursions ++ ;
3210 return ;
3211 }
3212
3213 if (Self->is_lock_owned ((address)cur)) {
3214 assert (_recursions == 0, "internal state error");
3215 _recursions = 1 ;
3216 // Commute owner from a thread-specific on-stack BasicLockObject address to
3217 // a full-fledged "Thread *".
3218 _owner = Self ;
3219 OwnerIsThread = 1 ;
3220 return ;
3221 }
3222
3223 // We've encountered genuine contention.
3224 assert (Self->_Stalled == 0, "invariant") ;
3225 Self->_Stalled = intptr_t(this) ;
3226
3227 // Try one round of spinning *before* enqueueing Self
3228 // and before going through the awkward and expensive state
3229 // transitions. The following spin is strictly optional ...
3230 // Note that if we acquire the monitor from an initial spin
3231 // we forgo posting JVMTI events and firing DTRACE probes.
3232 if (Knob_SpinEarly && TrySpin (Self) > 0) {
3233 assert (_owner == Self , "invariant") ;
3234 assert (_recursions == 0 , "invariant") ;
3235 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3236 Self->_Stalled = 0 ;
3237 return ;
3238 }
3239
3240 assert (_owner != Self , "invariant") ;
3241 assert (_succ != Self , "invariant") ;
3242 assert (Self->is_Java_thread() , "invariant") ;
3243 JavaThread * jt = (JavaThread *) Self ;
3244 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
3245 assert (jt->thread_state() != _thread_blocked , "invariant") ;
3246 assert (this->object() != NULL , "invariant") ;
3247 assert (_count >= 0, "invariant") ;
3248
3249 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
3250 // Ensure the object-monitor relationship remains stable while there's contention.
3251 Atomic::inc_ptr(&_count);
3252
3253 { // Change java thread status to indicate blocked on monitor enter.
3254 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
3255
3256 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
3257 if (JvmtiExport::should_post_monitor_contended_enter()) {
3258 JvmtiExport::post_monitor_contended_enter(jt, this);
3259 }
3260
3261 OSThreadContendState osts(Self->osthread());
3262 ThreadBlockInVM tbivm(jt);
3263
3264 Self->set_current_pending_monitor(this);
3265
3266 // TODO-FIXME: change the following for(;;) loop to straight-line code.
3267 for (;;) {
3268 jt->set_suspend_equivalent();
3269 // cleared by handle_special_suspend_equivalent_condition()
3270 // or java_suspend_self()
3271
3272 EnterI (THREAD) ;
3273
3274 if (!ExitSuspendEquivalent(jt)) break ;
3275
3276 //
3277 // We have acquired the contended monitor, but while we were
3278 // waiting another thread suspended us. We don't want to enter
3279 // the monitor while suspended because that would surprise the
3280 // thread that suspended us.
3281 //
3282 _recursions = 0 ;
3283 _succ = NULL ;
3284 exit (Self) ;
3285
3286 jt->java_suspend_self();
3287 }
3288 Self->set_current_pending_monitor(NULL);
3289 }
3290
3291 Atomic::dec_ptr(&_count);
3292 assert (_count >= 0, "invariant") ;
3293 Self->_Stalled = 0 ;
3294
3295 // Must either set _recursions = 0 or ASSERT _recursions == 0.
3296 assert (_recursions == 0 , "invariant") ;
3297 assert (_owner == Self , "invariant") ;
3298 assert (_succ != Self , "invariant") ;
3299 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3300
3301 // The thread -- now the owner -- is back in vm mode.
3302 // Report the glorious news via TI,DTrace and jvmstat.
3303 // The probe effect is non-trivial. All the reportage occurs
3304 // while we hold the monitor, increasing the length of the critical
3305 // section. Amdahl's parallel speedup law comes vividly into play.
3306 //
3307 // Another option might be to aggregate the events (thread local or
3308 // per-monitor aggregation) and defer reporting until a more opportune
3309 // time -- such as next time some thread encounters contention but has
3310 // yet to acquire the lock. While spinning that thread could
3311 // spinning we could increment JVMStat counters, etc.
3312
3313 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
3314 if (JvmtiExport::should_post_monitor_contended_entered()) {
3315 JvmtiExport::post_monitor_contended_entered(jt, this);
3316 }
3317 if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) {
3318 ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ;
3319 }
3320 }
3321
3322 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
3323 assert (_owner == Self, "invariant") ;
3324
3325 // Exit protocol:
3326 // 1. ST _succ = wakee
3327 // 2. membar #loadstore|#storestore;
3328 // 2. ST _owner = NULL
3329 // 3. unpark(wakee)
3330
3331 _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
3332 ParkEvent * Trigger = Wakee->_event ;
3333
3334 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
3335 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
3336 // out-of-scope (non-extant).
3337 Wakee = NULL ;
3338
3339 // Drop the lock
3340 OrderAccess::release_store_ptr (&_owner, NULL) ;
3341 OrderAccess::fence() ; // ST _owner vs LD in unpark()
3342
3343 // TODO-FIXME:
3344 // If there's a safepoint pending the best policy would be to
3345 // get _this thread to a safepoint and only wake the successor
3346 // after the safepoint completed. monitorexit uses a "leaf"
3347 // state transition, however, so this thread can't become
3348 // safe at this point in time. (Its stack isn't walkable).
3349 // The next best thing is to defer waking the successor by
3350 // adding to a list of thread to be unparked after at the
3351 // end of the forthcoming STW).
3352 if (SafepointSynchronize::do_call_back()) {
3353 TEVENT (unpark before SAFEPOINT) ;
3354 }
3355
3356 // Possible optimizations ...
3357 //
3358 // * Consider: set Wakee->UnparkTime = timeNow()
3359 // When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()).
3360 // By measuring recent ONPROC latency we can approximate the
3361 // system load. In turn, we can feed that information back
3362 // into the spinning & succession policies.
3363 // (ONPROC latency correlates strongly with load).
3364 //
3365 // * Pull affinity:
3366 // If the wakee is cold then transiently setting it's affinity
3367 // to the current CPU is a good idea.
3368 // See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt
3369 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
3370 Trigger->unpark() ;
3371
3372 // Maintain stats and report events to JVMTI
3373 if (ObjectSynchronizer::_sync_Parks != NULL) {
3374 ObjectSynchronizer::_sync_Parks->inc() ;
3375 }
3376 }
3377
3378
3379 // exit()
3380 // ~~~~~~
3381 // Note that the collector can't reclaim the objectMonitor or deflate
3382 // the object out from underneath the thread calling ::exit() as the
3383 // thread calling ::exit() never transitions to a stable state.
3384 // This inhibits GC, which in turn inhibits asynchronous (and
3385 // inopportune) reclamation of "this".
3386 //
3387 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
3388 // There's one exception to the claim above, however. EnterI() can call
3389 // exit() to drop a lock if the acquirer has been externally suspended.
3390 // In that case exit() is called with _thread_state as _thread_blocked,
3391 // but the monitor's _count field is > 0, which inhibits reclamation.
3392 //
3393 // 1-0 exit
3394 // ~~~~~~~~
3395 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
3396 // the fast-path operators have been optimized so the common ::exit()
3397 // operation is 1-0. See i486.ad fast_unlock(), for instance.
3398 // The code emitted by fast_unlock() elides the usual MEMBAR. This
3399 // greatly improves latency -- MEMBAR and CAS having considerable local
3400 // latency on modern processors -- but at the cost of "stranding". Absent the
3401 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
3402 // ::enter() path, resulting in the entering thread being stranding
3403 // and a progress-liveness failure. Stranding is extremely rare.
3404 // We use timers (timed park operations) & periodic polling to detect
3405 // and recover from stranding. Potentially stranded threads periodically
3406 // wake up and poll the lock. See the usage of the _Responsible variable.
3407 //
3408 // The CAS() in enter provides for safety and exclusion, while the CAS or
3409 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
3410 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
3411 // We detect and recover from stranding with timers.
3412 //
3413 // If a thread transiently strands it'll park until (a) another
3414 // thread acquires the lock and then drops the lock, at which time the
3415 // exiting thread will notice and unpark the stranded thread, or, (b)
3416 // the timer expires. If the lock is high traffic then the stranding latency
3417 // will be low due to (a). If the lock is low traffic then the odds of
3418 // stranding are lower, although the worst-case stranding latency
3419 // is longer. Critically, we don't want to put excessive load in the
3420 // platform's timer subsystem. We want to minimize both the timer injection
3421 // rate (timers created/sec) as well as the number of timers active at
3422 // any one time. (more precisely, we want to minimize timer-seconds, which is
3423 // the integral of the # of active timers at any instant over time).
3424 // Both impinge on OS scalability. Given that, at most one thread parked on
3425 // a monitor will use a timer.
3426
3427 void ATTR ObjectMonitor::exit(TRAPS) {
3428 Thread * Self = THREAD ;
3429 if (THREAD != _owner) {
3430 if (THREAD->is_lock_owned((address) _owner)) {
3431 // Transmute _owner from a BasicLock pointer to a Thread address.
3432 // We don't need to hold _mutex for this transition.
3433 // Non-null to Non-null is safe as long as all readers can
3434 // tolerate either flavor.
3435 assert (_recursions == 0, "invariant") ;
3436 _owner = THREAD ;
3437 _recursions = 0 ;
3438 OwnerIsThread = 1 ;
3439 } else {
3440 // NOTE: we need to handle unbalanced monitor enter/exit
3441 // in native code by throwing an exception.
3442 // TODO: Throw an IllegalMonitorStateException ?
3443 TEVENT (Exit - Throw IMSX) ;
3444 assert(false, "Non-balanced monitor enter/exit!");
3445 if (false) {
3446 THROW(vmSymbols::java_lang_IllegalMonitorStateException());
3447 }
3448 return;
3449 }
3450 }
3451
3452 if (_recursions != 0) {
3453 _recursions--; // this is simple recursive enter
3454 TEVENT (Inflated exit - recursive) ;
3455 return ;
3456 }
3457
3458 // Invariant: after setting Responsible=null an thread must execute
3459 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
3460 if ((SyncFlags & 4) == 0) {
3461 _Responsible = NULL ;
3462 }
3463
3464 for (;;) {
3465 assert (THREAD == _owner, "invariant") ;
3466
3467 // Fast-path monitor exit:
3468 //
3469 // Observe the Dekker/Lamport duality:
3470 // A thread in ::exit() executes:
3471 // ST Owner=null; MEMBAR; LD EntryList|cxq.
3472 // A thread in the contended ::enter() path executes the complementary:
3473 // ST EntryList|cxq = nonnull; MEMBAR; LD Owner.
3474 //
3475 // Note that there's a benign race in the exit path. We can drop the
3476 // lock, another thread can reacquire the lock immediately, and we can
3477 // then wake a thread unnecessarily (yet another flavor of futile wakeup).
3478 // This is benign, and we've structured the code so the windows are short
3479 // and the frequency of such futile wakeups is low.
3480 //
3481 // We could eliminate the race by encoding both the "LOCKED" state and
3482 // the queue head in a single word. Exit would then use either CAS to
3483 // clear the LOCKED bit/byte. This precludes the desirable 1-0 optimization,
3484 // however.
3485 //
3486 // Possible fast-path ::exit() optimization:
3487 // The current fast-path exit implementation fetches both cxq and EntryList.
3488 // See also i486.ad fast_unlock(). Testing has shown that two LDs
3489 // isn't measurably slower than a single LD on any platforms.
3490 // Still, we could reduce the 2 LDs to one or zero by one of the following:
3491 //
3492 // - Use _count instead of cxq|EntryList
3493 // We intend to eliminate _count, however, when we switch
3494 // to on-the-fly deflation in ::exit() as is used in
3495 // Metalocks and RelaxedLocks.
3496 //
3497 // - Establish the invariant that cxq == null implies EntryList == null.
3498 // set cxq == EMPTY (1) to encode the state where cxq is empty
3499 // by EntryList != null. EMPTY is a distinguished value.
3500 // The fast-path exit() would fetch cxq but not EntryList.
3501 //
3502 // - Encode succ as follows:
3503 // succ = t : Thread t is the successor -- t is ready or is spinning.
3504 // Exiting thread does not need to wake a successor.
3505 // succ = 0 : No successor required -> (EntryList|cxq) == null
3506 // Exiting thread does not need to wake a successor
3507 // succ = 1 : Successor required -> (EntryList|cxq) != null and
3508 // logically succ == null.
3509 // Exiting thread must wake a successor.
3510 //
3511 // The 1-1 fast-exit path would appear as :
3512 // _owner = null ; membar ;
3513 // if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath
3514 // goto FastPathDone ;
3515 //
3516 // and the 1-0 fast-exit path would appear as:
3517 // if (_succ == 1) goto SlowPath
3518 // Owner = null ;
3519 // goto FastPathDone
3520 //
3521 // - Encode the LSB of _owner as 1 to indicate that exit()
3522 // must use the slow-path and make a successor ready.
3523 // (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null
3524 // (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously)
3525 // The 1-0 fast exit path would read:
3526 // if (_owner != Self) goto SlowPath
3527 // _owner = null
3528 // goto FastPathDone
3529
3530 if (Knob_ExitPolicy == 0) {
3531 // release semantics: prior loads and stores from within the critical section
3532 // must not float (reorder) past the following store that drops the lock.
3533 // On SPARC that requires MEMBAR #loadstore|#storestore.
3534 // But of course in TSO #loadstore|#storestore is not required.
3535 // I'd like to write one of the following:
3536 // A. OrderAccess::release() ; _owner = NULL
3537 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
3538 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
3539 // store into a _dummy variable. That store is not needed, but can result
3540 // in massive wasteful coherency traffic on classic SMP systems.
3541 // Instead, I use release_store(), which is implemented as just a simple
3542 // ST on x64, x86 and SPARC.
3543 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
3544 OrderAccess::storeload() ; // See if we need to wake a successor
3545 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
3546 TEVENT (Inflated exit - simple egress) ;
3547 return ;
3548 }
3549 TEVENT (Inflated exit - complex egress) ;
3550
3551 // Normally the exiting thread is responsible for ensuring succession,
3552 // but if other successors are ready or other entering threads are spinning
3553 // then this thread can simply store NULL into _owner and exit without
3554 // waking a successor. The existence of spinners or ready successors
3555 // guarantees proper succession (liveness). Responsibility passes to the
3556 // ready or running successors. The exiting thread delegates the duty.
3557 // More precisely, if a successor already exists this thread is absolved
3558 // of the responsibility of waking (unparking) one.
3559 //
3560 // The _succ variable is critical to reducing futile wakeup frequency.
3561 // _succ identifies the "heir presumptive" thread that has been made
3562 // ready (unparked) but that has not yet run. We need only one such
3563 // successor thread to guarantee progress.
3564 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
3565 // section 3.3 "Futile Wakeup Throttling" for details.
3566 //
3567 // Note that spinners in Enter() also set _succ non-null.
3568 // In the current implementation spinners opportunistically set
3569 // _succ so that exiting threads might avoid waking a successor.
3570 // Another less appealing alternative would be for the exiting thread
3571 // to drop the lock and then spin briefly to see if a spinner managed
3572 // to acquire the lock. If so, the exiting thread could exit
3573 // immediately without waking a successor, otherwise the exiting
3574 // thread would need to dequeue and wake a successor.
3575 // (Note that we'd need to make the post-drop spin short, but no
3576 // shorter than the worst-case round-trip cache-line migration time.
3577 // The dropped lock needs to become visible to the spinner, and then
3578 // the acquisition of the lock by the spinner must become visible to
3579 // the exiting thread).
3580 //
3581
3582 // It appears that an heir-presumptive (successor) must be made ready.
3583 // Only the current lock owner can manipulate the EntryList or
3584 // drain _cxq, so we need to reacquire the lock. If we fail
3585 // to reacquire the lock the responsibility for ensuring succession
3586 // falls to the new owner.
3587 //
3588 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
3589 return ;
3590 }
3591 TEVENT (Exit - Reacquired) ;
3592 } else {
3593 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
3594 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
3595 OrderAccess::storeload() ;
3596 // Ratify the previously observed values.
3597 if (_cxq == NULL || _succ != NULL) {
3598 TEVENT (Inflated exit - simple egress) ;
3599 return ;
3600 }
3601
3602 // inopportune interleaving -- the exiting thread (this thread)
3603 // in the fast-exit path raced an entering thread in the slow-enter
3604 // path.
3605 // We have two choices:
3606 // A. Try to reacquire the lock.
3607 // If the CAS() fails return immediately, otherwise
3608 // we either restart/rerun the exit operation, or simply
3609 // fall-through into the code below which wakes a successor.
3610 // B. If the elements forming the EntryList|cxq are TSM
3611 // we could simply unpark() the lead thread and return
3612 // without having set _succ.
3613 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
3614 TEVENT (Inflated exit - reacquired succeeded) ;
3615 return ;
3616 }
3617 TEVENT (Inflated exit - reacquired failed) ;
3618 } else {
3619 TEVENT (Inflated exit - complex egress) ;
3620 }
3621 }
3622
3623 guarantee (_owner == THREAD, "invariant") ;
3624
3625 // Select an appropriate successor ("heir presumptive") from the EntryList
3626 // and make it ready. Generally we just wake the head of EntryList .
3627 // There's no algorithmic constraint that we use the head - it's just
3628 // a policy decision. Note that the thread at head of the EntryList
3629 // remains at the head until it acquires the lock. This means we'll
3630 // repeatedly wake the same thread until it manages to grab the lock.
3631 // This is generally a good policy - if we're seeing lots of futile wakeups
3632 // at least we're waking/rewaking a thread that's like to be hot or warm
3633 // (have residual D$ and TLB affinity).
3634 //
3635 // "Wakeup locality" optimization:
3636 // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt
3637 // In the future we'll try to bias the selection mechanism
3638 // to preferentially pick a thread that recently ran on
3639 // a processor element that shares cache with the CPU on which
3640 // the exiting thread is running. We need access to Solaris'
3641 // schedctl.sc_cpu to make that work.
3642 //
3643 ObjectWaiter * w = NULL ;
3644 int QMode = Knob_QMode ;
3645
3646 if (QMode == 2 && _cxq != NULL) {
3647 // QMode == 2 : cxq has precedence over EntryList.
3648 // Try to directly wake a successor from the cxq.
3649 // If successful, the successor will need to unlink itself from cxq.
3650 w = _cxq ;
3651 assert (w != NULL, "invariant") ;
3652 assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3653 ExitEpilog (Self, w) ;
3654 return ;
3655 }
3656
3657 if (QMode == 3 && _cxq != NULL) {
3658 // Aggressively drain cxq into EntryList at the first opportunity.
3659 // This policy ensure that recently-run threads live at the head of EntryList.
3660 // Drain _cxq into EntryList - bulk transfer.
3661 // First, detach _cxq.
3662 // The following loop is tantamount to: w = swap (&cxq, NULL)
3663 w = _cxq ;
3664 for (;;) {
3665 assert (w != NULL, "Invariant") ;
3666 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
3667 if (u == w) break ;
3668 w = u ;
3669 }
3670 assert (w != NULL , "invariant") ;
3671
3672 ObjectWaiter * q = NULL ;
3673 ObjectWaiter * p ;
3674 for (p = w ; p != NULL ; p = p->_next) {
3675 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3676 p->TState = ObjectWaiter::TS_ENTER ;
3677 p->_prev = q ;
3678 q = p ;
3679 }
3680
3681 // Append the RATs to the EntryList
3682 // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
3683 ObjectWaiter * Tail ;
3684 for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
3685 if (Tail == NULL) {
3686 _EntryList = w ;
3687 } else {
3688 Tail->_next = w ;
3689 w->_prev = Tail ;
3690 }
3691
3692 // Fall thru into code that tries to wake a successor from EntryList
3693 }
3694
3695 if (QMode == 4 && _cxq != NULL) {
3696 // Aggressively drain cxq into EntryList at the first opportunity.
3697 // This policy ensure that recently-run threads live at the head of EntryList.
3698
3699 // Drain _cxq into EntryList - bulk transfer.
3700 // First, detach _cxq.
3701 // The following loop is tantamount to: w = swap (&cxq, NULL)
3702 w = _cxq ;
3703 for (;;) {
3704 assert (w != NULL, "Invariant") ;
3705 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
3706 if (u == w) break ;
3707 w = u ;
3708 }
3709 assert (w != NULL , "invariant") ;
3710
3711 ObjectWaiter * q = NULL ;
3712 ObjectWaiter * p ;
3713 for (p = w ; p != NULL ; p = p->_next) {
3714 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3715 p->TState = ObjectWaiter::TS_ENTER ;
3716 p->_prev = q ;
3717 q = p ;
3718 }
3719
3720 // Prepend the RATs to the EntryList
3721 if (_EntryList != NULL) {
3722 q->_next = _EntryList ;
3723 _EntryList->_prev = q ;
3724 }
3725 _EntryList = w ;
3726
3727 // Fall thru into code that tries to wake a successor from EntryList
3728 }
3729
3730 w = _EntryList ;
3731 if (w != NULL) {
3732 // I'd like to write: guarantee (w->_thread != Self).
3733 // But in practice an exiting thread may find itself on the EntryList.
3734 // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
3735 // then calls exit(). Exit release the lock by setting O._owner to NULL.
3736 // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The
3737 // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
3738 // release the lock "O". T2 resumes immediately after the ST of null into
3739 // _owner, above. T2 notices that the EntryList is populated, so it
3740 // reacquires the lock and then finds itself on the EntryList.
3741 // Given all that, we have to tolerate the circumstance where "w" is
3742 // associated with Self.
3743 assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
3744 ExitEpilog (Self, w) ;
3745 return ;
3746 }
3747
3748 // If we find that both _cxq and EntryList are null then just
3749 // re-run the exit protocol from the top.
3750 w = _cxq ;
3751 if (w == NULL) continue ;
3752
3753 // Drain _cxq into EntryList - bulk transfer.
3754 // First, detach _cxq.
3755 // The following loop is tantamount to: w = swap (&cxq, NULL)
3756 for (;;) {
3757 assert (w != NULL, "Invariant") ;
3758 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
3759 if (u == w) break ;
3760 w = u ;
3761 }
3762 TEVENT (Inflated exit - drain cxq into EntryList) ;
3763
3764 assert (w != NULL , "invariant") ;
3765 assert (_EntryList == NULL , "invariant") ;
3766
3767 // Convert the LIFO SLL anchored by _cxq into a DLL.
3768 // The list reorganization step operates in O(LENGTH(w)) time.
3769 // It's critical that this step operate quickly as
3770 // "Self" still holds the outer-lock, restricting parallelism
3771 // and effectively lengthening the critical section.
3772 // Invariant: s chases t chases u.
3773 // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
3774 // we have faster access to the tail.
3775
3776 if (QMode == 1) {
3777 // QMode == 1 : drain cxq to EntryList, reversing order
3778 // We also reverse the order of the list.
3779 ObjectWaiter * s = NULL ;
3780 ObjectWaiter * t = w ;
3781 ObjectWaiter * u = NULL ;
3782 while (t != NULL) {
3783 guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
3784 t->TState = ObjectWaiter::TS_ENTER ;
3785 u = t->_next ;
3786 t->_prev = u ;
3787 t->_next = s ;
3788 s = t;
3789 t = u ;
3790 }
3791 _EntryList = s ;
3792 assert (s != NULL, "invariant") ;
3793 } else {
3794 // QMode == 0 or QMode == 2
3795 _EntryList = w ;
3796 ObjectWaiter * q = NULL ;
3797 ObjectWaiter * p ;
3798 for (p = w ; p != NULL ; p = p->_next) {
3799 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3800 p->TState = ObjectWaiter::TS_ENTER ;
3801 p->_prev = q ;
3802 q = p ;
3803 }
3804 }
3805
3806 // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
3807 // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
3808
3809 // See if we can abdicate to a spinner instead of waking a thread.
3810 // A primary goal of the implementation is to reduce the
3811 // context-switch rate.
3812 if (_succ != NULL) continue;
3813
3814 w = _EntryList ;
3815 if (w != NULL) {
3816 guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
3817 ExitEpilog (Self, w) ;
3818 return ;
3819 }
3820 }
3821 }
3822 // complete_exit exits a lock returning recursion count
3823 // complete_exit/reenter operate as a wait without waiting
3824 // complete_exit requires an inflated monitor
3825 // The _owner field is not always the Thread addr even with an
3826 // inflated monitor, e.g. the monitor can be inflated by a non-owning
3827 // thread due to contention.
3828 intptr_t ObjectMonitor::complete_exit(TRAPS) {
3829 Thread * const Self = THREAD;
3830 assert(Self->is_Java_thread(), "Must be Java thread!");
3831 JavaThread *jt = (JavaThread *)THREAD;
3832
3833 DeferredInitialize();
3834
3835 if (THREAD != _owner) {
3836 if (THREAD->is_lock_owned ((address)_owner)) {
3837 assert(_recursions == 0, "internal state error");
3838 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */
3839 _recursions = 0 ;
3840 OwnerIsThread = 1 ;
3841 }
3842 }
3843
3844 guarantee(Self == _owner, "complete_exit not owner");
3845 intptr_t save = _recursions; // record the old recursion count
3846 _recursions = 0; // set the recursion level to be 0
3847 exit (Self) ; // exit the monitor
3848 guarantee (_owner != Self, "invariant");
3849 return save;
3850 }
3851
3852 // reenter() enters a lock and sets recursion count
3853 // complete_exit/reenter operate as a wait without waiting
3854 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
3855 Thread * const Self = THREAD;
3856 assert(Self->is_Java_thread(), "Must be Java thread!");
3857 JavaThread *jt = (JavaThread *)THREAD;
3858
3859 guarantee(_owner != Self, "reenter already owner");
3860 enter (THREAD); // enter the monitor
3861 guarantee (_recursions == 0, "reenter recursion");
3862 _recursions = recursions;
3863 return;
3864 }
3865
3866 // Note: a subset of changes to ObjectMonitor::wait()
3867 // will need to be replicated in complete_exit above
3868 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
3869 Thread * const Self = THREAD ;
3870 assert(Self->is_Java_thread(), "Must be Java thread!");
3871 JavaThread *jt = (JavaThread *)THREAD;
3872
3873 DeferredInitialize () ;
3874
3875 // Throw IMSX or IEX.
3876 CHECK_OWNER();
3877
3878 // check for a pending interrupt
3879 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
3880 // post monitor waited event. Note that this is past-tense, we are done waiting.
3881 if (JvmtiExport::should_post_monitor_waited()) {
3882 // Note: 'false' parameter is passed here because the
3883 // wait was not timed out due to thread interrupt.
3884 JvmtiExport::post_monitor_waited(jt, this, false);
3885 }
3886 TEVENT (Wait - Throw IEX) ;
3887 THROW(vmSymbols::java_lang_InterruptedException());
3888 return ;
3889 }
3890 TEVENT (Wait) ;
3891
3892 assert (Self->_Stalled == 0, "invariant") ;
3893 Self->_Stalled = intptr_t(this) ;
3894 jt->set_current_waiting_monitor(this);
3895
3896 // create a node to be put into the queue
3897 // Critically, after we reset() the event but prior to park(), we must check
3898 // for a pending interrupt.
3899 ObjectWaiter node(Self);
3900 node.TState = ObjectWaiter::TS_WAIT ;
3901 Self->_ParkEvent->reset() ;
3902 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
3903
3904 // Enter the waiting queue, which is a circular doubly linked list in this case
3905 // but it could be a priority queue or any data structure.
3906 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
3907 // by the the owner of the monitor *except* in the case where park()
3908 // returns because of a timeout of interrupt. Contention is exceptionally rare
3909 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
3910
3911 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
3912 AddWaiter (&node) ;
3913 Thread::SpinRelease (&_WaitSetLock) ;
3914
3915 if ((SyncFlags & 4) == 0) {
3916 _Responsible = NULL ;
3917 }
3918 intptr_t save = _recursions; // record the old recursion count
3919 _waiters++; // increment the number of waiters
3920 _recursions = 0; // set the recursion level to be 1
3921 exit (Self) ; // exit the monitor
3922 guarantee (_owner != Self, "invariant") ;
3923
3924 // As soon as the ObjectMonitor's ownership is dropped in the exit()
3925 // call above, another thread can enter() the ObjectMonitor, do the
3926 // notify(), and exit() the ObjectMonitor. If the other thread's
3927 // exit() call chooses this thread as the successor and the unpark()
3928 // call happens to occur while this thread is posting a
3929 // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
3930 // handler using RawMonitors and consuming the unpark().
3931 //
3932 // To avoid the problem, we re-post the event. This does no harm
3933 // even if the original unpark() was not consumed because we are the
3934 // chosen successor for this monitor.
3935 if (node._notified != 0 && _succ == Self) {
3936 node._event->unpark();
3937 }
3938
3939 // The thread is on the WaitSet list - now park() it.
3940 // On MP systems it's conceivable that a brief spin before we park
3941 // could be profitable.
3942 //
3943 // TODO-FIXME: change the following logic to a loop of the form
3944 // while (!timeout && !interrupted && _notified == 0) park()
3945
3946 int ret = OS_OK ;
3947 int WasNotified = 0 ;
3948 { // State transition wrappers
3949 OSThread* osthread = Self->osthread();
3950 OSThreadWaitState osts(osthread, true);
3951 {
3952 ThreadBlockInVM tbivm(jt);
3953 // Thread is in thread_blocked state and oop access is unsafe.
3954 jt->set_suspend_equivalent();
3955
3956 if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
3957 // Intentionally empty
3958 } else
3959 if (node._notified == 0) {
3960 if (millis <= 0) {
3961 Self->_ParkEvent->park () ;
3962 } else {
3963 ret = Self->_ParkEvent->park (millis) ;
3964 }
3965 }
3966
3967 // were we externally suspended while we were waiting?
3968 if (ExitSuspendEquivalent (jt)) {
3969 // TODO-FIXME: add -- if succ == Self then succ = null.
3970 jt->java_suspend_self();
3971 }
3972
3973 } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
3974
3975
3976 // Node may be on the WaitSet, the EntryList (or cxq), or in transition
3977 // from the WaitSet to the EntryList.
3978 // See if we need to remove Node from the WaitSet.
3979 // We use double-checked locking to avoid grabbing _WaitSetLock
3980 // if the thread is not on the wait queue.
3981 //
3982 // Note that we don't need a fence before the fetch of TState.
3983 // In the worst case we'll fetch a old-stale value of TS_WAIT previously
3984 // written by the is thread. (perhaps the fetch might even be satisfied
3985 // by a look-aside into the processor's own store buffer, although given
3986 // the length of the code path between the prior ST and this load that's
3987 // highly unlikely). If the following LD fetches a stale TS_WAIT value
3988 // then we'll acquire the lock and then re-fetch a fresh TState value.
3989 // That is, we fail toward safety.
3990
3991 if (node.TState == ObjectWaiter::TS_WAIT) {
3992 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
3993 if (node.TState == ObjectWaiter::TS_WAIT) {
3994 DequeueSpecificWaiter (&node) ; // unlink from WaitSet
3995 assert(node._notified == 0, "invariant");
3996 node.TState = ObjectWaiter::TS_RUN ;
3997 }
3998 Thread::SpinRelease (&_WaitSetLock) ;
3999 }
4000
4001 // The thread is now either on off-list (TS_RUN),
4002 // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
4003 // The Node's TState variable is stable from the perspective of this thread.
4004 // No other threads will asynchronously modify TState.
4005 guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
4006 OrderAccess::loadload() ;
4007 if (_succ == Self) _succ = NULL ;
4008 WasNotified = node._notified ;
4009
4010 // Reentry phase -- reacquire the monitor.
4011 // re-enter contended monitor after object.wait().
4012 // retain OBJECT_WAIT state until re-enter successfully completes
4013 // Thread state is thread_in_vm and oop access is again safe,
4014 // although the raw address of the object may have changed.
4015 // (Don't cache naked oops over safepoints, of course).
4016
4017 // post monitor waited event. Note that this is past-tense, we are done waiting.
4018 if (JvmtiExport::should_post_monitor_waited()) {
4019 JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
4020 }
4021 OrderAccess::fence() ;
4022
4023 assert (Self->_Stalled != 0, "invariant") ;
4024 Self->_Stalled = 0 ;
4025
4026 assert (_owner != Self, "invariant") ;
4027 ObjectWaiter::TStates v = node.TState ;
4028 if (v == ObjectWaiter::TS_RUN) {
4029 enter (Self) ;
4030 } else {
4031 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
4032 ReenterI (Self, &node) ;
4033 node.wait_reenter_end(this);
4034 }
4035
4036 // Self has reacquired the lock.
4037 // Lifecycle - the node representing Self must not appear on any queues.
4038 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
4039 // want residual elements associated with this thread left on any lists.
4040 guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
4041 assert (_owner == Self, "invariant") ;
4042 assert (_succ != Self , "invariant") ;
4043 } // OSThreadWaitState()
4044
4045 jt->set_current_waiting_monitor(NULL);
4046
4047 guarantee (_recursions == 0, "invariant") ;
4048 _recursions = save; // restore the old recursion count
4049 _waiters--; // decrement the number of waiters
4050
4051 // Verify a few postconditions
4052 assert (_owner == Self , "invariant") ;
4053 assert (_succ != Self , "invariant") ;
4054 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
4055
4056 if (SyncFlags & 32) {
4057 OrderAccess::fence() ;
4058 }
4059
4060 // check if the notification happened
4061 if (!WasNotified) {
4062 // no, it could be timeout or Thread.interrupt() or both
4063 // check for interrupt event, otherwise it is timeout
4064 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
4065 TEVENT (Wait - throw IEX from epilog) ;
4066 THROW(vmSymbols::java_lang_InterruptedException());
4067 }
4068 }
4069
4070 // NOTE: Spurious wake up will be consider as timeout.
4071 // Monitor notify has precedence over thread interrupt.
4072 }
4073
4074
4075 // Consider:
4076 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
4077 // then instead of transferring a thread from the WaitSet to the EntryList
4078 // we might just dequeue a thread from the WaitSet and directly unpark() it.
4079
4080 void ObjectMonitor::notify(TRAPS) {
4081 CHECK_OWNER();
4082 if (_WaitSet == NULL) {
4083 TEVENT (Empty-Notify) ;
4084 return ;
4085 }
4086 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
4087
4088 int Policy = Knob_MoveNotifyee ;
4089
4090 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
4091 ObjectWaiter * iterator = DequeueWaiter() ;
4092 if (iterator != NULL) {
4093 TEVENT (Notify1 - Transfer) ;
4094 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
4095 guarantee (iterator->_notified == 0, "invariant") ;
4096 // Disposition - what might we do with iterator ?
4097 // a. add it directly to the EntryList - either tail or head.
4098 // b. push it onto the front of the _cxq.
4099 // For now we use (a).
4100 if (Policy != 4) {
4101 iterator->TState = ObjectWaiter::TS_ENTER ;
4102 }
4103 iterator->_notified = 1 ;
4104
4105 ObjectWaiter * List = _EntryList ;
4106 if (List != NULL) {
4107 assert (List->_prev == NULL, "invariant") ;
4108 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4109 assert (List != iterator, "invariant") ;
4110 }
4111
4112 if (Policy == 0) { // prepend to EntryList
4113 if (List == NULL) {
4114 iterator->_next = iterator->_prev = NULL ;
4115 _EntryList = iterator ;
4116 } else {
4117 List->_prev = iterator ;
4118 iterator->_next = List ;
4119 iterator->_prev = NULL ;
4120 _EntryList = iterator ;
4121 }
4122 } else
4123 if (Policy == 1) { // append to EntryList
4124 if (List == NULL) {
4125 iterator->_next = iterator->_prev = NULL ;
4126 _EntryList = iterator ;
4127 } else {
4128 // CONSIDER: finding the tail currently requires a linear-time walk of
4129 // the EntryList. We can make tail access constant-time by converting to
4130 // a CDLL instead of using our current DLL.
4131 ObjectWaiter * Tail ;
4132 for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
4133 assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
4134 Tail->_next = iterator ;
4135 iterator->_prev = Tail ;
4136 iterator->_next = NULL ;
4137 }
4138 } else
4139 if (Policy == 2) { // prepend to cxq
4140 // prepend to cxq
4141 if (List == NULL) {
4142 iterator->_next = iterator->_prev = NULL ;
4143 _EntryList = iterator ;
4144 } else {
4145 iterator->TState = ObjectWaiter::TS_CXQ ;
4146 for (;;) {
4147 ObjectWaiter * Front = _cxq ;
4148 iterator->_next = Front ;
4149 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
4150 break ;
4151 }
4152 }
4153 }
4154 } else
4155 if (Policy == 3) { // append to cxq
4156 iterator->TState = ObjectWaiter::TS_CXQ ;
4157 for (;;) {
4158 ObjectWaiter * Tail ;
4159 Tail = _cxq ;
4160 if (Tail == NULL) {
4161 iterator->_next = NULL ;
4162 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
4163 break ;
4164 }
4165 } else {
4166 while (Tail->_next != NULL) Tail = Tail->_next ;
4167 Tail->_next = iterator ;
4168 iterator->_prev = Tail ;
4169 iterator->_next = NULL ;
4170 break ;
4171 }
4172 }
4173 } else {
4174 ParkEvent * ev = iterator->_event ;
4175 iterator->TState = ObjectWaiter::TS_RUN ;
4176 OrderAccess::fence() ;
4177 ev->unpark() ;
4178 }
4179
4180 if (Policy < 4) {
4181 iterator->wait_reenter_begin(this);
4182 }
4183
4184 // _WaitSetLock protects the wait queue, not the EntryList. We could
4185 // move the add-to-EntryList operation, above, outside the critical section
4186 // protected by _WaitSetLock. In practice that's not useful. With the
4187 // exception of wait() timeouts and interrupts the monitor owner
4188 // is the only thread that grabs _WaitSetLock. There's almost no contention
4189 // on _WaitSetLock so it's not profitable to reduce the length of the
4190 // critical section.
4191 }
4192
4193 Thread::SpinRelease (&_WaitSetLock) ;
4194
4195 if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) {
4196 ObjectSynchronizer::_sync_Notifications->inc() ;
4197 }
4198 }
4199
4200
4201 void ObjectMonitor::notifyAll(TRAPS) {
4202 CHECK_OWNER();
4203 ObjectWaiter* iterator;
4204 if (_WaitSet == NULL) {
4205 TEVENT (Empty-NotifyAll) ;
4206 return ;
4207 }
4208 DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
4209
4210 int Policy = Knob_MoveNotifyee ;
4211 int Tally = 0 ;
4212 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
4213
4214 for (;;) {
4215 iterator = DequeueWaiter () ;
4216 if (iterator == NULL) break ;
4217 TEVENT (NotifyAll - Transfer1) ;
4218 ++Tally ;
4219
4220 // Disposition - what might we do with iterator ?
4221 // a. add it directly to the EntryList - either tail or head.
4222 // b. push it onto the front of the _cxq.
4223 // For now we use (a).
4224 //
4225 // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset
4226 // to the EntryList. This could be done more efficiently with a single bulk transfer,
4227 // but in practice it's not time-critical. Beware too, that in prepend-mode we invert the
4228 // order of the waiters. Lets say that the waitset is "ABCD" and the EntryList is "XYZ".
4229 // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will
4230 // be "DCBAXYZ".
4231
4232 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
4233 guarantee (iterator->_notified == 0, "invariant") ;
4234 iterator->_notified = 1 ;
4235 if (Policy != 4) {
4236 iterator->TState = ObjectWaiter::TS_ENTER ;
4237 }
4238
4239 ObjectWaiter * List = _EntryList ;
4240 if (List != NULL) {
4241 assert (List->_prev == NULL, "invariant") ;
4242 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4243 assert (List != iterator, "invariant") ;
4244 }
4245
4246 if (Policy == 0) { // prepend to EntryList
4247 if (List == NULL) {
4248 iterator->_next = iterator->_prev = NULL ;
4249 _EntryList = iterator ;
4250 } else {
4251 List->_prev = iterator ;
4252 iterator->_next = List ;
4253 iterator->_prev = NULL ;
4254 _EntryList = iterator ;
4255 }
4256 } else
4257 if (Policy == 1) { // append to EntryList
4258 if (List == NULL) {
4259 iterator->_next = iterator->_prev = NULL ;
4260 _EntryList = iterator ;
4261 } else {
4262 // CONSIDER: finding the tail currently requires a linear-time walk of
4263 // the EntryList. We can make tail access constant-time by converting to
4264 // a CDLL instead of using our current DLL.
4265 ObjectWaiter * Tail ;
4266 for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
4267 assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
4268 Tail->_next = iterator ;
4269 iterator->_prev = Tail ;
4270 iterator->_next = NULL ;
4271 }
4272 } else
4273 if (Policy == 2) { // prepend to cxq
4274 // prepend to cxq
4275 iterator->TState = ObjectWaiter::TS_CXQ ;
4276 for (;;) {
4277 ObjectWaiter * Front = _cxq ;
4278 iterator->_next = Front ;
4279 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
4280 break ;
4281 }
4282 }
4283 } else
4284 if (Policy == 3) { // append to cxq
4285 iterator->TState = ObjectWaiter::TS_CXQ ;
4286 for (;;) {
4287 ObjectWaiter * Tail ;
4288 Tail = _cxq ;
4289 if (Tail == NULL) {
4290 iterator->_next = NULL ;
4291 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
4292 break ;
4293 }
4294 } else {
4295 while (Tail->_next != NULL) Tail = Tail->_next ;
4296 Tail->_next = iterator ;
4297 iterator->_prev = Tail ;
4298 iterator->_next = NULL ;
4299 break ;
4300 }
4301 }
4302 } else {
4303 ParkEvent * ev = iterator->_event ;
4304 iterator->TState = ObjectWaiter::TS_RUN ;
4305 OrderAccess::fence() ;
4306 ev->unpark() ;
4307 }
4308
4309 if (Policy < 4) {
4310 iterator->wait_reenter_begin(this);
4311 }
4312
4313 // _WaitSetLock protects the wait queue, not the EntryList. We could
4314 // move the add-to-EntryList operation, above, outside the critical section
4315 // protected by _WaitSetLock. In practice that's not useful. With the
4316 // exception of wait() timeouts and interrupts the monitor owner
4317 // is the only thread that grabs _WaitSetLock. There's almost no contention
4318 // on _WaitSetLock so it's not profitable to reduce the length of the
4319 // critical section.
4320 }
4321
4322 Thread::SpinRelease (&_WaitSetLock) ;
4323
4324 if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) {
4325 ObjectSynchronizer::_sync_Notifications->inc(Tally) ;
4326 }
4327 }
4328
4329 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
4330 // TODO-FIXME: remove check_slow() -- it's likely dead.
4331
4332 void ObjectMonitor::check_slow(TRAPS) {
4333 TEVENT (check_slow - throw IMSX) ;
4334 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
4335 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
4336 }
4337
4338
4339 // -------------------------------------------------------------------------
4340 // The raw monitor subsystem is entirely distinct from normal
4341 // java-synchronization or jni-synchronization. raw monitors are not
4342 // associated with objects. They can be implemented in any manner
4343 // that makes sense. The original implementors decided to piggy-back
4344 // the raw-monitor implementation on the existing Java objectMonitor mechanism.
4345 // This flaw needs to fixed. We should reimplement raw monitors as sui-generis.
4346 // Specifically, we should not implement raw monitors via java monitors.
4347 // Time permitting, we should disentangle and deconvolve the two implementations
4348 // and move the resulting raw monitor implementation over to the JVMTI directories.
4349 // Ideally, the raw monitor implementation would be built on top of
4350 // park-unpark and nothing else.
4351 //
4352 // raw monitors are used mainly by JVMTI
4353 // The raw monitor implementation borrows the ObjectMonitor structure,
4354 // but the operators are degenerate and extremely simple.
4355 //
4356 // Mixed use of a single objectMonitor instance -- as both a raw monitor
4357 // and a normal java monitor -- is not permissible.
4358 //
4359 // Note that we use the single RawMonitor_lock to protect queue operations for
4360 // _all_ raw monitors. This is a scalability impediment, but since raw monitor usage
4361 // is deprecated and rare, this is not of concern. The RawMonitor_lock can not
4362 // be held indefinitely. The critical sections must be short and bounded.
4363 //
4364 // -------------------------------------------------------------------------
4365
4366 int ObjectMonitor::SimpleEnter (Thread * Self) {
4367 for (;;) {
4368 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
4369 return OS_OK ;
4370 }
4371
4372 ObjectWaiter Node (Self) ;
4373 Self->_ParkEvent->reset() ; // strictly optional
4374 Node.TState = ObjectWaiter::TS_ENTER ;
4375
4376 RawMonitor_lock->lock_without_safepoint_check() ;
4377 Node._next = _EntryList ;
4378 _EntryList = &Node ;
4379 OrderAccess::fence() ;
4380 if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
4381 _EntryList = Node._next ;
4382 RawMonitor_lock->unlock() ;
4383 return OS_OK ;
4384 }
4385 RawMonitor_lock->unlock() ;
4386 while (Node.TState == ObjectWaiter::TS_ENTER) {
4387 Self->_ParkEvent->park() ;
4388 }
4389 }
4390 }
4391
4392 int ObjectMonitor::SimpleExit (Thread * Self) {
4393 guarantee (_owner == Self, "invariant") ;
4394 OrderAccess::release_store_ptr (&_owner, NULL) ;
4395 OrderAccess::fence() ;
4396 if (_EntryList == NULL) return OS_OK ;
4397 ObjectWaiter * w ;
4398
4399 RawMonitor_lock->lock_without_safepoint_check() ;
4400 w = _EntryList ;
4401 if (w != NULL) {
4402 _EntryList = w->_next ;
4403 }
4404 RawMonitor_lock->unlock() ;
4405 if (w != NULL) {
4406 guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4407 ParkEvent * ev = w->_event ;
4408 w->TState = ObjectWaiter::TS_RUN ;
4409 OrderAccess::fence() ;
4410 ev->unpark() ;
4411 }
4412 return OS_OK ;
4413 }
4414
4415 int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) {
4416 guarantee (_owner == Self , "invariant") ;
4417 guarantee (_recursions == 0, "invariant") ;
4418
4419 ObjectWaiter Node (Self) ;
4420 Node._notified = 0 ;
4421 Node.TState = ObjectWaiter::TS_WAIT ;
4422
4423 RawMonitor_lock->lock_without_safepoint_check() ;
4424 Node._next = _WaitSet ;
4425 _WaitSet = &Node ;
4426 RawMonitor_lock->unlock() ;
4427
4428 SimpleExit (Self) ;
4429 guarantee (_owner != Self, "invariant") ;
4430
4431 int ret = OS_OK ;
4432 if (millis <= 0) {
4433 Self->_ParkEvent->park();
4434 } else {
4435 ret = Self->_ParkEvent->park(millis);
4436 }
4437
4438 // If thread still resides on the waitset then unlink it.
4439 // Double-checked locking -- the usage is safe in this context
4440 // as we TState is volatile and the lock-unlock operators are
4441 // serializing (barrier-equivalent).
4442
4443 if (Node.TState == ObjectWaiter::TS_WAIT) {
4444 RawMonitor_lock->lock_without_safepoint_check() ;
4445 if (Node.TState == ObjectWaiter::TS_WAIT) {
4446 // Simple O(n) unlink, but performance isn't critical here.
4447 ObjectWaiter * p ;
4448 ObjectWaiter * q = NULL ;
4449 for (p = _WaitSet ; p != &Node; p = p->_next) {
4450 q = p ;
4451 }
4452 guarantee (p == &Node, "invariant") ;
4453 if (q == NULL) {
4454 guarantee (p == _WaitSet, "invariant") ;
4455 _WaitSet = p->_next ;
4456 } else {
4457 guarantee (p == q->_next, "invariant") ;
4458 q->_next = p->_next ;
4459 }
4460 Node.TState = ObjectWaiter::TS_RUN ;
4461 }
4462 RawMonitor_lock->unlock() ;
4463 }
4464
4465 guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
4466 SimpleEnter (Self) ;
4467
4468 guarantee (_owner == Self, "invariant") ;
4469 guarantee (_recursions == 0, "invariant") ;
4470 return ret ;
4471 }
4472
4473 int ObjectMonitor::SimpleNotify (Thread * Self, bool All) {
4474 guarantee (_owner == Self, "invariant") ;
4475 if (_WaitSet == NULL) return OS_OK ;
4476
4477 // We have two options:
4478 // A. Transfer the threads from the WaitSet to the EntryList
4479 // B. Remove the thread from the WaitSet and unpark() it.
4480 //
4481 // We use (B), which is crude and results in lots of futile
4482 // context switching. In particular (B) induces lots of contention.
4483
4484 ParkEvent * ev = NULL ; // consider using a small auto array ...
4485 RawMonitor_lock->lock_without_safepoint_check() ;
4486 for (;;) {
4487 ObjectWaiter * w = _WaitSet ;
4488 if (w == NULL) break ;
4489 _WaitSet = w->_next ;
4490 if (ev != NULL) { ev->unpark(); ev = NULL; }
4491 ev = w->_event ;
4492 OrderAccess::loadstore() ;
4493 w->TState = ObjectWaiter::TS_RUN ;
4494 OrderAccess::storeload();
4495 if (!All) break ;
4496 }
4497 RawMonitor_lock->unlock() ;
4498 if (ev != NULL) ev->unpark();
4499 return OS_OK ;
4500 }
4501
4502 // Any JavaThread will enter here with state _thread_blocked
4503 int ObjectMonitor::raw_enter(TRAPS) {
4504 TEVENT (raw_enter) ;
4505 void * Contended ;
4506
4507 // don't enter raw monitor if thread is being externally suspended, it will
4508 // surprise the suspender if a "suspended" thread can still enter monitor
4509 JavaThread * jt = (JavaThread *)THREAD;
4510 if (THREAD->is_Java_thread()) {
4511 jt->SR_lock()->lock_without_safepoint_check();
4512 while (jt->is_external_suspend()) {
4513 jt->SR_lock()->unlock();
4514 jt->java_suspend_self();
4515 jt->SR_lock()->lock_without_safepoint_check();
4516 }
4517 // guarded by SR_lock to avoid racing with new external suspend requests.
4518 Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
4519 jt->SR_lock()->unlock();
4520 } else {
4521 Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
4522 }
4523
4524 if (Contended == THREAD) {
4525 _recursions ++ ;
4526 return OM_OK ;
4527 }
4528
4529 if (Contended == NULL) {
4530 guarantee (_owner == THREAD, "invariant") ;
4531 guarantee (_recursions == 0, "invariant") ;
4532 return OM_OK ;
4533 }
4534
4535 THREAD->set_current_pending_monitor(this);
4536
4537 if (!THREAD->is_Java_thread()) {
4538 // No other non-Java threads besides VM thread would acquire
4539 // a raw monitor.
4540 assert(THREAD->is_VM_thread(), "must be VM thread");
4541 SimpleEnter (THREAD) ;
4542 } else {
4543 guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
4544 for (;;) {
4545 jt->set_suspend_equivalent();
4546 // cleared by handle_special_suspend_equivalent_condition() or
4547 // java_suspend_self()
4548 SimpleEnter (THREAD) ;
4549
4550 // were we externally suspended while we were waiting?
4551 if (!jt->handle_special_suspend_equivalent_condition()) break ;
4552
4553 // This thread was externally suspended
4554 //
4555 // This logic isn't needed for JVMTI raw monitors,
4556 // but doesn't hurt just in case the suspend rules change. This
4557 // logic is needed for the ObjectMonitor.wait() reentry phase.
4558 // We have reentered the contended monitor, but while we were
4559 // waiting another thread suspended us. We don't want to reenter
4560 // the monitor while suspended because that would surprise the
4561 // thread that suspended us.
4562 //
4563 // Drop the lock -
4564 SimpleExit (THREAD) ;
4565
4566 jt->java_suspend_self();
4567 }
4568
4569 assert(_owner == THREAD, "Fatal error with monitor owner!");
4570 assert(_recursions == 0, "Fatal error with monitor recursions!");
4571 }
4572
4573 THREAD->set_current_pending_monitor(NULL);
4574 guarantee (_recursions == 0, "invariant") ;
4575 return OM_OK;
4576 }
4577
4578 // Used mainly for JVMTI raw monitor implementation
4579 // Also used for ObjectMonitor::wait().
4580 int ObjectMonitor::raw_exit(TRAPS) {
4581 TEVENT (raw_exit) ;
4582 if (THREAD != _owner) {
4583 return OM_ILLEGAL_MONITOR_STATE;
4584 }
4585 if (_recursions > 0) {
4586 --_recursions ;
4587 return OM_OK ;
4588 }
4589
4590 void * List = _EntryList ;
4591 SimpleExit (THREAD) ;
4592
4593 return OM_OK;
4594 }
4595
4596 // Used for JVMTI raw monitor implementation.
4597 // All JavaThreads will enter here with state _thread_blocked
4598
4599 int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
4600 TEVENT (raw_wait) ;
4601 if (THREAD != _owner) {
4602 return OM_ILLEGAL_MONITOR_STATE;
4603 }
4604
4605 // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
4606 // The caller must be able to tolerate spurious returns from raw_wait().
4607 THREAD->_ParkEvent->reset() ;
4608 OrderAccess::fence() ;
4609
4610 // check interrupt event
4611 if (interruptible && Thread::is_interrupted(THREAD, true)) {
4612 return OM_INTERRUPTED;
4613 }
4614
4615 intptr_t save = _recursions ;
4616 _recursions = 0 ;
4617 _waiters ++ ;
4618 if (THREAD->is_Java_thread()) {
4619 guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
4620 ((JavaThread *)THREAD)->set_suspend_equivalent();
4621 }
4622 int rv = SimpleWait (THREAD, millis) ;
4623 _recursions = save ;
4624 _waiters -- ;
4625
4626 guarantee (THREAD == _owner, "invariant") ;
4627 if (THREAD->is_Java_thread()) {
4628 JavaThread * jSelf = (JavaThread *) THREAD ;
4629 for (;;) {
4630 if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
4631 SimpleExit (THREAD) ;
4632 jSelf->java_suspend_self();
4633 SimpleEnter (THREAD) ;
4634 jSelf->set_suspend_equivalent() ;
4635 }
4636 }
4637 guarantee (THREAD == _owner, "invariant") ;
4638
4639 if (interruptible && Thread::is_interrupted(THREAD, true)) {
4640 return OM_INTERRUPTED;
4641 }
4642 return OM_OK ;
4643 }
4644
4645 int ObjectMonitor::raw_notify(TRAPS) {
4646 TEVENT (raw_notify) ;
4647 if (THREAD != _owner) {
4648 return OM_ILLEGAL_MONITOR_STATE;
4649 }
4650 SimpleNotify (THREAD, false) ;
4651 return OM_OK;
4652 }
4653
4654 int ObjectMonitor::raw_notifyAll(TRAPS) {
4655 TEVENT (raw_notifyAll) ;
4656 if (THREAD != _owner) {
4657 return OM_ILLEGAL_MONITOR_STATE;
4658 }
4659 SimpleNotify (THREAD, true) ;
4660 return OM_OK;
4661 }
4662
4663 #ifndef PRODUCT
4664 void ObjectMonitor::verify() {
4665 }
4666
4667 void ObjectMonitor::print() {
4668 }
4669 #endif
4670 1581
4671 //------------------------------------------------------------------------------ 1582 //------------------------------------------------------------------------------
4672 // Non-product code 1583 // Non-product code
4673 1584
4674 #ifndef PRODUCT 1585 #ifndef PRODUCT