Mercurial > hg > graal-compiler
annotate src/share/vm/runtime/synchronizer.cpp @ 886:061cd4d965fc
6862534: -XX:NewRatio completely ignored when combined with -XX:+UseConcMarkSweepG
Summary: Use NewRatio if it is explicitly set.
Reviewed-by: ysr, jcoomes
author | jmasa |
---|---|
date | Sun, 02 Aug 2009 18:44:36 -0700 |
parents | b9fba36710f2 |
children | b30a2cd5e3a2 c18cbe5936b8 b96a3e44582f |
rev | line source |
---|---|
0 | 1 /* |
579 | 2 * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_synchronizer.cpp.incl" | |
27 | |
28 #if defined(__GNUC__) && !defined(IA64) | |
29 // Need to inhibit inlining for older versions of GCC to avoid build-time failures | |
30 #define ATTR __attribute__((noinline)) | |
31 #else | |
32 #define ATTR | |
33 #endif | |
34 | |
35 // Native markword accessors for synchronization and hashCode(). | |
36 // | |
37 // The "core" versions of monitor enter and exit reside in this file. | |
38 // The interpreter and compilers contain specialized transliterated | |
39 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), | |
40 // for instance. If you make changes here, make sure to modify the | |
41 // interpreter, and both C1 and C2 fast-path inline locking code emission. | |
42 // | |
43 // TODO: merge the objectMonitor and synchronizer classes. | |
44 // | |
45 // ----------------------------------------------------------------------------- | |
46 | |
47 #ifdef DTRACE_ENABLED | |
48 | |
49 // Only bother with this argument setup if dtrace is available | |
50 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. | |
51 | |
52 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait, | |
53 jlong, uintptr_t, char*, int, long); | |
54 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited, | |
55 jlong, uintptr_t, char*, int); | |
56 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify, | |
57 jlong, uintptr_t, char*, int); | |
58 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll, | |
59 jlong, uintptr_t, char*, int); | |
60 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter, | |
61 jlong, uintptr_t, char*, int); | |
62 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered, | |
63 jlong, uintptr_t, char*, int); | |
64 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit, | |
65 jlong, uintptr_t, char*, int); | |
66 | |
67 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread) \ | |
68 char* bytes = NULL; \ | |
69 int len = 0; \ | |
70 jlong jtid = SharedRuntime::get_java_tid(thread); \ | |
71 symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name(); \ | |
72 if (klassname != NULL) { \ | |
73 bytes = (char*)klassname->bytes(); \ | |
74 len = klassname->utf8_length(); \ | |
75 } | |
76 | |
77 #define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis) \ | |
78 { \ | |
79 if (DTraceMonitorProbes) { \ | |
80 DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \ | |
81 HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \ | |
82 (monitor), bytes, len, (millis)); \ | |
83 } \ | |
84 } | |
85 | |
86 #define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread) \ | |
87 { \ | |
88 if (DTraceMonitorProbes) { \ | |
89 DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \ | |
90 HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \ | |
91 (uintptr_t)(monitor), bytes, len); \ | |
92 } \ | |
93 } | |
94 | |
95 #else // ndef DTRACE_ENABLED | |
96 | |
97 #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon) {;} | |
98 #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon) {;} | |
99 | |
100 #endif // ndef DTRACE_ENABLED | |
101 | |
102 // ObjectWaiter serves as a "proxy" or surrogate thread. | |
103 // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific | |
104 // ParkEvent instead. Beware, however, that the JVMTI code | |
105 // knows about ObjectWaiters, so we'll have to reconcile that code. | |
106 // See next_waiter(), first_waiter(), etc. | |
107 | |
108 class ObjectWaiter : public StackObj { | |
109 public: | |
110 enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ; | |
111 enum Sorted { PREPEND, APPEND, SORTED } ; | |
112 ObjectWaiter * volatile _next; | |
113 ObjectWaiter * volatile _prev; | |
114 Thread* _thread; | |
115 ParkEvent * _event; | |
116 volatile int _notified ; | |
117 volatile TStates TState ; | |
118 Sorted _Sorted ; // List placement disposition | |
119 bool _active ; // Contention monitoring is enabled | |
120 public: | |
121 ObjectWaiter(Thread* thread) { | |
122 _next = NULL; | |
123 _prev = NULL; | |
124 _notified = 0; | |
125 TState = TS_RUN ; | |
126 _thread = thread; | |
127 _event = thread->_ParkEvent ; | |
128 _active = false; | |
129 assert (_event != NULL, "invariant") ; | |
130 } | |
131 | |
132 void wait_reenter_begin(ObjectMonitor *mon) { | |
133 JavaThread *jt = (JavaThread *)this->_thread; | |
134 _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon); | |
135 } | |
136 | |
137 void wait_reenter_end(ObjectMonitor *mon) { | |
138 JavaThread *jt = (JavaThread *)this->_thread; | |
139 JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active); | |
140 } | |
141 }; | |
142 | |
143 enum ManifestConstants { | |
144 ClearResponsibleAtSTW = 0, | |
145 MaximumRecheckInterval = 1000 | |
146 } ; | |
147 | |
148 | |
149 #undef TEVENT | |
150 #define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); } | |
151 | |
152 #define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }} | |
153 | |
154 #undef TEVENT | |
155 #define TEVENT(nom) {;} | |
156 | |
157 // Performance concern: | |
158 // OrderAccess::storestore() calls release() which STs 0 into the global volatile | |
159 // OrderAccess::Dummy variable. This store is unnecessary for correctness. | |
160 // Many threads STing into a common location causes considerable cache migration | |
161 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore() | |
162 // until it's repaired. In some cases OrderAccess::fence() -- which incurs local | |
163 // latency on the executing processor -- is a better choice as it scales on SMP | |
164 // systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a | |
165 // discussion of coherency costs. Note that all our current reference platforms | |
166 // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC. | |
167 // | |
168 // As a general policy we use "volatile" to control compiler-based reordering | |
169 // and explicit fences (barriers) to control for architectural reordering performed | |
170 // by the CPU(s) or platform. | |
171 | |
172 static int MBFence (int x) { OrderAccess::fence(); return x; } | |
173 | |
174 struct SharedGlobals { | |
175 // These are highly shared mostly-read variables. | |
176 // To avoid false-sharing they need to be the sole occupants of a $ line. | |
177 double padPrefix [8]; | |
178 volatile int stwRandom ; | |
179 volatile int stwCycle ; | |
180 | |
181 // Hot RW variables -- Sequester to avoid false-sharing | |
182 double padSuffix [16]; | |
183 volatile int hcSequence ; | |
184 double padFinal [8] ; | |
185 } ; | |
186 | |
187 static SharedGlobals GVars ; | |
188 | |
189 | |
190 // Tunables ... | |
191 // The knob* variables are effectively final. Once set they should | |
192 // never be modified hence. Consider using __read_mostly with GCC. | |
193 | |
194 static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins | |
195 static int Knob_HandOff = 0 ; | |
196 static int Knob_Verbose = 0 ; | |
197 static int Knob_ReportSettings = 0 ; | |
198 | |
199 static int Knob_SpinLimit = 5000 ; // derived by an external tool - | |
200 static int Knob_SpinBase = 0 ; // Floor AKA SpinMin | |
201 static int Knob_SpinBackOff = 0 ; // spin-loop backoff | |
202 static int Knob_CASPenalty = -1 ; // Penalty for failed CAS | |
203 static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change | |
204 static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field | |
205 static int Knob_SpinEarly = 1 ; | |
206 static int Knob_SuccEnabled = 1 ; // futile wake throttling | |
207 static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one | |
208 static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs | |
209 static int Knob_Bonus = 100 ; // spin success bonus | |
210 static int Knob_BonusB = 100 ; // spin success bonus | |
211 static int Knob_Penalty = 200 ; // spin failure penalty | |
212 static int Knob_Poverty = 1000 ; | |
213 static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park() | |
214 static int Knob_FixedSpin = 0 ; | |
215 static int Knob_OState = 3 ; // Spinner checks thread state of _owner | |
216 static int Knob_UsePause = 1 ; | |
217 static int Knob_ExitPolicy = 0 ; | |
218 static int Knob_PreSpin = 10 ; // 20-100 likely better | |
219 static int Knob_ResetEvent = 0 ; | |
220 static int BackOffMask = 0 ; | |
221 | |
222 static int Knob_FastHSSEC = 0 ; | |
223 static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee | |
224 static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline | |
225 static volatile int InitDone = 0 ; | |
226 | |
227 | |
228 // hashCode() generation : | |
229 // | |
230 // Possibilities: | |
231 // * MD5Digest of {obj,stwRandom} | |
232 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. | |
233 // * A DES- or AES-style SBox[] mechanism | |
234 // * One of the Phi-based schemes, such as: | |
235 // 2654435761 = 2^32 * Phi (golden ratio) | |
236 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; | |
237 // * A variation of Marsaglia's shift-xor RNG scheme. | |
238 // * (obj ^ stwRandom) is appealing, but can result | |
239 // in undesirable regularity in the hashCode values of adjacent objects | |
240 // (objects allocated back-to-back, in particular). This could potentially | |
241 // result in hashtable collisions and reduced hashtable efficiency. | |
242 // There are simple ways to "diffuse" the middle address bits over the | |
243 // generated hashCode values: | |
244 // | |
245 | |
246 static inline intptr_t get_next_hash(Thread * Self, oop obj) { | |
247 intptr_t value = 0 ; | |
248 if (hashCode == 0) { | |
249 // This form uses an unguarded global Park-Miller RNG, | |
250 // so it's possible for two threads to race and generate the same RNG. | |
251 // On MP system we'll have lots of RW access to a global, so the | |
252 // mechanism induces lots of coherency traffic. | |
253 value = os::random() ; | |
254 } else | |
255 if (hashCode == 1) { | |
256 // This variation has the property of being stable (idempotent) | |
257 // between STW operations. This can be useful in some of the 1-0 | |
258 // synchronization schemes. | |
259 intptr_t addrBits = intptr_t(obj) >> 3 ; | |
260 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ; | |
261 } else | |
262 if (hashCode == 2) { | |
263 value = 1 ; // for sensitivity testing | |
264 } else | |
265 if (hashCode == 3) { | |
266 value = ++GVars.hcSequence ; | |
267 } else | |
268 if (hashCode == 4) { | |
269 value = intptr_t(obj) ; | |
270 } else { | |
271 // Marsaglia's xor-shift scheme with thread-specific state | |
272 // This is probably the best overall implementation -- we'll | |
273 // likely make this the default in future releases. | |
274 unsigned t = Self->_hashStateX ; | |
275 t ^= (t << 11) ; | |
276 Self->_hashStateX = Self->_hashStateY ; | |
277 Self->_hashStateY = Self->_hashStateZ ; | |
278 Self->_hashStateZ = Self->_hashStateW ; | |
279 unsigned v = Self->_hashStateW ; | |
280 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ; | |
281 Self->_hashStateW = v ; | |
282 value = v ; | |
283 } | |
284 | |
285 value &= markOopDesc::hash_mask; | |
286 if (value == 0) value = 0xBAD ; | |
287 assert (value != markOopDesc::no_hash, "invariant") ; | |
288 TEVENT (hashCode: GENERATE) ; | |
289 return value; | |
290 } | |
291 | |
292 void BasicLock::print_on(outputStream* st) const { | |
293 st->print("monitor"); | |
294 } | |
295 | |
296 void BasicLock::move_to(oop obj, BasicLock* dest) { | |
297 // Check to see if we need to inflate the lock. This is only needed | |
298 // if an object is locked using "this" lightweight monitor. In that | |
299 // case, the displaced_header() is unlocked, because the | |
300 // displaced_header() contains the header for the originally unlocked | |
301 // object. However the object could have already been inflated. But it | |
302 // does not matter, the inflation will just a no-op. For other cases, | |
303 // the displaced header will be either 0x0 or 0x3, which are location | |
304 // independent, therefore the BasicLock is free to move. | |
305 // | |
306 // During OSR we may need to relocate a BasicLock (which contains a | |
307 // displaced word) from a location in an interpreter frame to a | |
308 // new location in a compiled frame. "this" refers to the source | |
309 // basiclock in the interpreter frame. "dest" refers to the destination | |
310 // basiclock in the new compiled frame. We *always* inflate in move_to(). | |
311 // The always-Inflate policy works properly, but in 1.5.0 it can sometimes | |
312 // cause performance problems in code that makes heavy use of a small # of | |
313 // uncontended locks. (We'd inflate during OSR, and then sync performance | |
314 // would subsequently plummet because the thread would be forced thru the slow-path). | |
315 // This problem has been made largely moot on IA32 by inlining the inflated fast-path | |
316 // operations in Fast_Lock and Fast_Unlock in i486.ad. | |
317 // | |
318 // Note that there is a way to safely swing the object's markword from | |
319 // one stack location to another. This avoids inflation. Obviously, | |
320 // we need to ensure that both locations refer to the current thread's stack. | |
321 // There are some subtle concurrency issues, however, and since the benefit is | |
322 // is small (given the support for inflated fast-path locking in the fast_lock, etc) | |
323 // we'll leave that optimization for another time. | |
324 | |
325 if (displaced_header()->is_neutral()) { | |
326 ObjectSynchronizer::inflate_helper(obj); | |
327 // WARNING: We can not put check here, because the inflation | |
328 // will not update the displaced header. Once BasicLock is inflated, | |
329 // no one should ever look at its content. | |
330 } else { | |
331 // Typically the displaced header will be 0 (recursive stack lock) or | |
332 // unused_mark. Naively we'd like to assert that the displaced mark | |
333 // value is either 0, neutral, or 3. But with the advent of the | |
334 // store-before-CAS avoidance in fast_lock/compiler_lock_object | |
335 // we can find any flavor mark in the displaced mark. | |
336 } | |
337 // [RGV] The next line appears to do nothing! | |
338 intptr_t dh = (intptr_t) displaced_header(); | |
339 dest->set_displaced_header(displaced_header()); | |
340 } | |
341 | |
342 // ----------------------------------------------------------------------------- | |
343 | |
344 // standard constructor, allows locking failures | |
345 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { | |
346 _dolock = doLock; | |
347 _thread = thread; | |
348 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) | |
349 _obj = obj; | |
350 | |
351 if (_dolock) { | |
352 TEVENT (ObjectLocker) ; | |
353 | |
354 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); | |
355 } | |
356 } | |
357 | |
358 ObjectLocker::~ObjectLocker() { | |
359 if (_dolock) { | |
360 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); | |
361 } | |
362 } | |
363 | |
364 // ----------------------------------------------------------------------------- | |
365 | |
366 | |
367 PerfCounter * ObjectSynchronizer::_sync_Inflations = NULL ; | |
368 PerfCounter * ObjectSynchronizer::_sync_Deflations = NULL ; | |
369 PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts = NULL ; | |
370 PerfCounter * ObjectSynchronizer::_sync_FutileWakeups = NULL ; | |
371 PerfCounter * ObjectSynchronizer::_sync_Parks = NULL ; | |
372 PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications = NULL ; | |
373 PerfCounter * ObjectSynchronizer::_sync_Notifications = NULL ; | |
374 PerfCounter * ObjectSynchronizer::_sync_PrivateA = NULL ; | |
375 PerfCounter * ObjectSynchronizer::_sync_PrivateB = NULL ; | |
376 PerfCounter * ObjectSynchronizer::_sync_SlowExit = NULL ; | |
377 PerfCounter * ObjectSynchronizer::_sync_SlowEnter = NULL ; | |
378 PerfCounter * ObjectSynchronizer::_sync_SlowNotify = NULL ; | |
379 PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll = NULL ; | |
380 PerfCounter * ObjectSynchronizer::_sync_FailedSpins = NULL ; | |
381 PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins = NULL ; | |
382 PerfCounter * ObjectSynchronizer::_sync_MonInCirculation = NULL ; | |
383 PerfCounter * ObjectSynchronizer::_sync_MonScavenged = NULL ; | |
384 PerfLongVariable * ObjectSynchronizer::_sync_MonExtant = NULL ; | |
385 | |
386 // One-shot global initialization for the sync subsystem. | |
387 // We could also defer initialization and initialize on-demand | |
388 // the first time we call inflate(). Initialization would | |
389 // be protected - like so many things - by the MonitorCache_lock. | |
390 | |
391 void ObjectSynchronizer::Initialize () { | |
392 static int InitializationCompleted = 0 ; | |
393 assert (InitializationCompleted == 0, "invariant") ; | |
394 InitializationCompleted = 1 ; | |
395 if (UsePerfData) { | |
396 EXCEPTION_MARK ; | |
397 #define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); } | |
398 #define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); } | |
399 NEWPERFCOUNTER(_sync_Inflations) ; | |
400 NEWPERFCOUNTER(_sync_Deflations) ; | |
401 NEWPERFCOUNTER(_sync_ContendedLockAttempts) ; | |
402 NEWPERFCOUNTER(_sync_FutileWakeups) ; | |
403 NEWPERFCOUNTER(_sync_Parks) ; | |
404 NEWPERFCOUNTER(_sync_EmptyNotifications) ; | |
405 NEWPERFCOUNTER(_sync_Notifications) ; | |
406 NEWPERFCOUNTER(_sync_SlowEnter) ; | |
407 NEWPERFCOUNTER(_sync_SlowExit) ; | |
408 NEWPERFCOUNTER(_sync_SlowNotify) ; | |
409 NEWPERFCOUNTER(_sync_SlowNotifyAll) ; | |
410 NEWPERFCOUNTER(_sync_FailedSpins) ; | |
411 NEWPERFCOUNTER(_sync_SuccessfulSpins) ; | |
412 NEWPERFCOUNTER(_sync_PrivateA) ; | |
413 NEWPERFCOUNTER(_sync_PrivateB) ; | |
414 NEWPERFCOUNTER(_sync_MonInCirculation) ; | |
415 NEWPERFCOUNTER(_sync_MonScavenged) ; | |
416 NEWPERFVARIABLE(_sync_MonExtant) ; | |
417 #undef NEWPERFCOUNTER | |
418 } | |
419 } | |
420 | |
421 // Compile-time asserts | |
422 // When possible, it's better to catch errors deterministically at | |
423 // compile-time than at runtime. The down-side to using compile-time | |
424 // asserts is that error message -- often something about negative array | |
425 // indices -- is opaque. | |
426 | |
513
2328d1d3f8cf
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
478
diff
changeset
|
427 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); } |
0 | 428 |
429 void ObjectMonitor::ctAsserts() { | |
430 CTASSERT(offset_of (ObjectMonitor, _header) == 0); | |
431 } | |
432 | |
433 static int Adjust (volatile int * adr, int dx) { | |
434 int v ; | |
435 for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ; | |
436 return v ; | |
437 } | |
438 | |
439 // Ad-hoc mutual exclusion primitives: SpinLock and Mux | |
440 // | |
441 // We employ SpinLocks _only for low-contention, fixed-length | |
442 // short-duration critical sections where we're concerned | |
443 // about native mutex_t or HotSpot Mutex:: latency. | |
444 // The mux construct provides a spin-then-block mutual exclusion | |
445 // mechanism. | |
446 // | |
447 // Testing has shown that contention on the ListLock guarding gFreeList | |
448 // is common. If we implement ListLock as a simple SpinLock it's common | |
449 // for the JVM to devolve to yielding with little progress. This is true | |
450 // despite the fact that the critical sections protected by ListLock are | |
451 // extremely short. | |
452 // | |
453 // TODO-FIXME: ListLock should be of type SpinLock. | |
454 // We should make this a 1st-class type, integrated into the lock | |
455 // hierarchy as leaf-locks. Critically, the SpinLock structure | |
456 // should have sufficient padding to avoid false-sharing and excessive | |
457 // cache-coherency traffic. | |
458 | |
459 | |
460 typedef volatile int SpinLockT ; | |
461 | |
462 void Thread::SpinAcquire (volatile int * adr, const char * LockName) { | |
463 if (Atomic::cmpxchg (1, adr, 0) == 0) { | |
464 return ; // normal fast-path return | |
465 } | |
466 | |
467 // Slow-path : We've encountered contention -- Spin/Yield/Block strategy. | |
468 TEVENT (SpinAcquire - ctx) ; | |
469 int ctr = 0 ; | |
470 int Yields = 0 ; | |
471 for (;;) { | |
472 while (*adr != 0) { | |
473 ++ctr ; | |
474 if ((ctr & 0xFFF) == 0 || !os::is_MP()) { | |
475 if (Yields > 5) { | |
476 // Consider using a simple NakedSleep() instead. | |
477 // Then SpinAcquire could be called by non-JVM threads | |
478 Thread::current()->_ParkEvent->park(1) ; | |
479 } else { | |
480 os::NakedYield() ; | |
481 ++Yields ; | |
482 } | |
483 } else { | |
484 SpinPause() ; | |
485 } | |
486 } | |
487 if (Atomic::cmpxchg (1, adr, 0) == 0) return ; | |
488 } | |
489 } | |
490 | |
491 void Thread::SpinRelease (volatile int * adr) { | |
492 assert (*adr != 0, "invariant") ; | |
493 OrderAccess::fence() ; // guarantee at least release consistency. | |
494 // Roach-motel semantics. | |
495 // It's safe if subsequent LDs and STs float "up" into the critical section, | |
496 // but prior LDs and STs within the critical section can't be allowed | |
497 // to reorder or float past the ST that releases the lock. | |
498 *adr = 0 ; | |
499 } | |
500 | |
501 // muxAcquire and muxRelease: | |
502 // | |
503 // * muxAcquire and muxRelease support a single-word lock-word construct. | |
504 // The LSB of the word is set IFF the lock is held. | |
505 // The remainder of the word points to the head of a singly-linked list | |
506 // of threads blocked on the lock. | |
507 // | |
508 // * The current implementation of muxAcquire-muxRelease uses its own | |
509 // dedicated Thread._MuxEvent instance. If we're interested in | |
510 // minimizing the peak number of extant ParkEvent instances then | |
511 // we could eliminate _MuxEvent and "borrow" _ParkEvent as long | |
512 // as certain invariants were satisfied. Specifically, care would need | |
513 // to be taken with regards to consuming unpark() "permits". | |
514 // A safe rule of thumb is that a thread would never call muxAcquire() | |
515 // if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently | |
516 // park(). Otherwise the _ParkEvent park() operation in muxAcquire() could | |
517 // consume an unpark() permit intended for monitorenter, for instance. | |
518 // One way around this would be to widen the restricted-range semaphore | |
519 // implemented in park(). Another alternative would be to provide | |
520 // multiple instances of the PlatformEvent() for each thread. One | |
521 // instance would be dedicated to muxAcquire-muxRelease, for instance. | |
522 // | |
523 // * Usage: | |
524 // -- Only as leaf locks | |
525 // -- for short-term locking only as muxAcquire does not perform | |
526 // thread state transitions. | |
527 // | |
528 // Alternatives: | |
529 // * We could implement muxAcquire and muxRelease with MCS or CLH locks | |
530 // but with parking or spin-then-park instead of pure spinning. | |
531 // * Use Taura-Oyama-Yonenzawa locks. | |
532 // * It's possible to construct a 1-0 lock if we encode the lockword as | |
533 // (List,LockByte). Acquire will CAS the full lockword while Release | |
534 // will STB 0 into the LockByte. The 1-0 scheme admits stranding, so | |
535 // acquiring threads use timers (ParkTimed) to detect and recover from | |
536 // the stranding window. Thread/Node structures must be aligned on 256-byte | |
537 // boundaries by using placement-new. | |
538 // * Augment MCS with advisory back-link fields maintained with CAS(). | |
539 // Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner. | |
540 // The validity of the backlinks must be ratified before we trust the value. | |
541 // If the backlinks are invalid the exiting thread must back-track through the | |
542 // the forward links, which are always trustworthy. | |
543 // * Add a successor indication. The LockWord is currently encoded as | |
544 // (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable | |
545 // to provide the usual futile-wakeup optimization. | |
546 // See RTStt for details. | |
547 // * Consider schedctl.sc_nopreempt to cover the critical section. | |
548 // | |
549 | |
550 | |
551 typedef volatile intptr_t MutexT ; // Mux Lock-word | |
552 enum MuxBits { LOCKBIT = 1 } ; | |
553 | |
554 void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) { | |
555 intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ; | |
556 if (w == 0) return ; | |
557 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { | |
558 return ; | |
559 } | |
560 | |
561 TEVENT (muxAcquire - Contention) ; | |
562 ParkEvent * const Self = Thread::current()->_MuxEvent ; | |
563 assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ; | |
564 for (;;) { | |
565 int its = (os::is_MP() ? 100 : 0) + 1 ; | |
566 | |
567 // Optional spin phase: spin-then-park strategy | |
568 while (--its >= 0) { | |
569 w = *Lock ; | |
570 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { | |
571 return ; | |
572 } | |
573 } | |
574 | |
575 Self->reset() ; | |
576 Self->OnList = intptr_t(Lock) ; | |
577 // The following fence() isn't _strictly necessary as the subsequent | |
578 // CAS() both serializes execution and ratifies the fetched *Lock value. | |
579 OrderAccess::fence(); | |
580 for (;;) { | |
581 w = *Lock ; | |
582 if ((w & LOCKBIT) == 0) { | |
583 if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { | |
584 Self->OnList = 0 ; // hygiene - allows stronger asserts | |
585 return ; | |
586 } | |
587 continue ; // Interference -- *Lock changed -- Just retry | |
588 } | |
589 assert (w & LOCKBIT, "invariant") ; | |
590 Self->ListNext = (ParkEvent *) (w & ~LOCKBIT ); | |
591 if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ; | |
592 } | |
593 | |
594 while (Self->OnList != 0) { | |
595 Self->park() ; | |
596 } | |
597 } | |
598 } | |
599 | |
600 void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) { | |
601 intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ; | |
602 if (w == 0) return ; | |
603 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { | |
604 return ; | |
605 } | |
606 | |
607 TEVENT (muxAcquire - Contention) ; | |
608 ParkEvent * ReleaseAfter = NULL ; | |
609 if (ev == NULL) { | |
610 ev = ReleaseAfter = ParkEvent::Allocate (NULL) ; | |
611 } | |
612 assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ; | |
613 for (;;) { | |
614 guarantee (ev->OnList == 0, "invariant") ; | |
615 int its = (os::is_MP() ? 100 : 0) + 1 ; | |
616 | |
617 // Optional spin phase: spin-then-park strategy | |
618 while (--its >= 0) { | |
619 w = *Lock ; | |
620 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { | |
621 if (ReleaseAfter != NULL) { | |
622 ParkEvent::Release (ReleaseAfter) ; | |
623 } | |
624 return ; | |
625 } | |
626 } | |
627 | |
628 ev->reset() ; | |
629 ev->OnList = intptr_t(Lock) ; | |
630 // The following fence() isn't _strictly necessary as the subsequent | |
631 // CAS() both serializes execution and ratifies the fetched *Lock value. | |
632 OrderAccess::fence(); | |
633 for (;;) { | |
634 w = *Lock ; | |
635 if ((w & LOCKBIT) == 0) { | |
636 if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { | |
637 ev->OnList = 0 ; | |
638 // We call ::Release while holding the outer lock, thus | |
639 // artificially lengthening the critical section. | |
640 // Consider deferring the ::Release() until the subsequent unlock(), | |
641 // after we've dropped the outer lock. | |
642 if (ReleaseAfter != NULL) { | |
643 ParkEvent::Release (ReleaseAfter) ; | |
644 } | |
645 return ; | |
646 } | |
647 continue ; // Interference -- *Lock changed -- Just retry | |
648 } | |
649 assert (w & LOCKBIT, "invariant") ; | |
650 ev->ListNext = (ParkEvent *) (w & ~LOCKBIT ); | |
651 if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ; | |
652 } | |
653 | |
654 while (ev->OnList != 0) { | |
655 ev->park() ; | |
656 } | |
657 } | |
658 } | |
659 | |
660 // Release() must extract a successor from the list and then wake that thread. | |
661 // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme | |
662 // similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based | |
663 // Release() would : | |
664 // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list. | |
665 // (B) Extract a successor from the private list "in-hand" | |
666 // (C) attempt to CAS() the residual back into *Lock over null. | |
667 // If there were any newly arrived threads and the CAS() would fail. | |
668 // In that case Release() would detach the RATs, re-merge the list in-hand | |
669 // with the RATs and repeat as needed. Alternately, Release() might | |
670 // detach and extract a successor, but then pass the residual list to the wakee. | |
671 // The wakee would be responsible for reattaching and remerging before it | |
672 // competed for the lock. | |
673 // | |
674 // Both "pop" and DMR are immune from ABA corruption -- there can be | |
675 // multiple concurrent pushers, but only one popper or detacher. | |
676 // This implementation pops from the head of the list. This is unfair, | |
677 // but tends to provide excellent throughput as hot threads remain hot. | |
678 // (We wake recently run threads first). | |
679 | |
680 void Thread::muxRelease (volatile intptr_t * Lock) { | |
681 for (;;) { | |
682 const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ; | |
683 assert (w & LOCKBIT, "invariant") ; | |
684 if (w == LOCKBIT) return ; | |
685 ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ; | |
686 assert (List != NULL, "invariant") ; | |
687 assert (List->OnList == intptr_t(Lock), "invariant") ; | |
688 ParkEvent * nxt = List->ListNext ; | |
689 | |
690 // The following CAS() releases the lock and pops the head element. | |
691 if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) { | |
692 continue ; | |
693 } | |
694 List->OnList = 0 ; | |
695 OrderAccess::fence() ; | |
696 List->unpark () ; | |
697 return ; | |
698 } | |
699 } | |
700 | |
701 // ObjectMonitor Lifecycle | |
702 // ----------------------- | |
703 // Inflation unlinks monitors from the global gFreeList and | |
704 // associates them with objects. Deflation -- which occurs at | |
705 // STW-time -- disassociates idle monitors from objects. Such | |
706 // scavenged monitors are returned to the gFreeList. | |
707 // | |
708 // The global list is protected by ListLock. All the critical sections | |
709 // are short and operate in constant-time. | |
710 // | |
711 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. | |
712 // | |
713 // Lifecycle: | |
714 // -- unassigned and on the global free list | |
715 // -- unassigned and on a thread's private omFreeList | |
716 // -- assigned to an object. The object is inflated and the mark refers | |
717 // to the objectmonitor. | |
718 // | |
719 // TODO-FIXME: | |
720 // | |
721 // * We currently protect the gFreeList with a simple lock. | |
722 // An alternate lock-free scheme would be to pop elements from the gFreeList | |
723 // with CAS. This would be safe from ABA corruption as long we only | |
724 // recycled previously appearing elements onto the list in deflate_idle_monitors() | |
725 // at STW-time. Completely new elements could always be pushed onto the gFreeList | |
726 // with CAS. Elements that appeared previously on the list could only | |
727 // be installed at STW-time. | |
728 // | |
729 // * For efficiency and to help reduce the store-before-CAS penalty | |
730 // the objectmonitors on gFreeList or local free lists should be ready to install | |
731 // with the exception of _header and _object. _object can be set after inflation. | |
732 // In particular, keep all objectMonitors on a thread's private list in ready-to-install | |
733 // state with m.Owner set properly. | |
734 // | |
735 // * We could all diffuse contention by using multiple global (FreeList, Lock) | |
736 // pairs -- threads could use trylock() and a cyclic-scan strategy to search for | |
737 // an unlocked free list. | |
738 // | |
739 // * Add lifecycle tags and assert()s. | |
740 // | |
741 // * Be more consistent about when we clear an objectmonitor's fields: | |
742 // A. After extracting the objectmonitor from a free list. | |
743 // B. After adding an objectmonitor to a free list. | |
744 // | |
745 | |
746 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ; | |
747 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ; | |
748 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache | |
749 #define CHAINMARKER ((oop)-1) | |
750 | |
751 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) { | |
752 // A large MAXPRIVATE value reduces both list lock contention | |
753 // and list coherency traffic, but also tends to increase the | |
754 // number of objectMonitors in circulation as well as the STW | |
755 // scavenge costs. As usual, we lean toward time in space-time | |
756 // tradeoffs. | |
757 const int MAXPRIVATE = 1024 ; | |
758 for (;;) { | |
759 ObjectMonitor * m ; | |
760 | |
761 // 1: try to allocate from the thread's local omFreeList. | |
762 // Threads will attempt to allocate first from their local list, then | |
763 // from the global list, and only after those attempts fail will the thread | |
764 // attempt to instantiate new monitors. Thread-local free lists take | |
765 // heat off the ListLock and improve allocation latency, as well as reducing | |
766 // coherency traffic on the shared global list. | |
767 m = Self->omFreeList ; | |
768 if (m != NULL) { | |
769 Self->omFreeList = m->FreeNext ; | |
770 Self->omFreeCount -- ; | |
771 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene | |
772 guarantee (m->object() == NULL, "invariant") ; | |
773 return m ; | |
774 } | |
775 | |
776 // 2: try to allocate from the global gFreeList | |
777 // CONSIDER: use muxTry() instead of muxAcquire(). | |
778 // If the muxTry() fails then drop immediately into case 3. | |
779 // If we're using thread-local free lists then try | |
780 // to reprovision the caller's free list. | |
781 if (gFreeList != NULL) { | |
782 // Reprovision the thread's omFreeList. | |
783 // Use bulk transfers to reduce the allocation rate and heat | |
784 // on various locks. | |
785 Thread::muxAcquire (&ListLock, "omAlloc") ; | |
786 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) { | |
787 ObjectMonitor * take = gFreeList ; | |
788 gFreeList = take->FreeNext ; | |
789 guarantee (take->object() == NULL, "invariant") ; | |
790 guarantee (!take->is_busy(), "invariant") ; | |
791 take->Recycle() ; | |
792 omRelease (Self, take) ; | |
793 } | |
794 Thread::muxRelease (&ListLock) ; | |
795 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ; | |
796 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ; | |
797 TEVENT (omFirst - reprovision) ; | |
798 continue ; | |
799 } | |
800 | |
801 // 3: allocate a block of new ObjectMonitors | |
802 // Both the local and global free lists are empty -- resort to malloc(). | |
803 // In the current implementation objectMonitors are TSM - immortal. | |
804 assert (_BLOCKSIZE > 1, "invariant") ; | |
805 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE]; | |
806 | |
807 // NOTE: (almost) no way to recover if allocation failed. | |
808 // We might be able to induce a STW safepoint and scavenge enough | |
809 // objectMonitors to permit progress. | |
810 if (temp == NULL) { | |
811 vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ; | |
812 } | |
813 | |
814 // Format the block. | |
815 // initialize the linked list, each monitor points to its next | |
816 // forming the single linked free list, the very first monitor | |
817 // will points to next block, which forms the block list. | |
818 // The trick of using the 1st element in the block as gBlockList | |
819 // linkage should be reconsidered. A better implementation would | |
820 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } | |
821 | |
822 for (int i = 1; i < _BLOCKSIZE ; i++) { | |
823 temp[i].FreeNext = &temp[i+1]; | |
824 } | |
825 | |
826 // terminate the last monitor as the end of list | |
827 temp[_BLOCKSIZE - 1].FreeNext = NULL ; | |
828 | |
829 // Element [0] is reserved for global list linkage | |
830 temp[0].set_object(CHAINMARKER); | |
831 | |
832 // Consider carving out this thread's current request from the | |
833 // block in hand. This avoids some lock traffic and redundant | |
834 // list activity. | |
835 | |
836 // Acquire the ListLock to manipulate BlockList and FreeList. | |
837 // An Oyama-Taura-Yonezawa scheme might be more efficient. | |
838 Thread::muxAcquire (&ListLock, "omAlloc [2]") ; | |
839 | |
840 // Add the new block to the list of extant blocks (gBlockList). | |
841 // The very first objectMonitor in a block is reserved and dedicated. | |
842 // It serves as blocklist "next" linkage. | |
843 temp[0].FreeNext = gBlockList; | |
844 gBlockList = temp; | |
845 | |
846 // Add the new string of objectMonitors to the global free list | |
847 temp[_BLOCKSIZE - 1].FreeNext = gFreeList ; | |
848 gFreeList = temp + 1; | |
849 Thread::muxRelease (&ListLock) ; | |
850 TEVENT (Allocate block of monitors) ; | |
851 } | |
852 } | |
853 | |
854 // Place "m" on the caller's private per-thread omFreeList. | |
855 // In practice there's no need to clamp or limit the number of | |
856 // monitors on a thread's omFreeList as the only time we'll call | |
857 // omRelease is to return a monitor to the free list after a CAS | |
858 // attempt failed. This doesn't allow unbounded #s of monitors to | |
859 // accumulate on a thread's free list. | |
860 // | |
861 // In the future the usage of omRelease() might change and monitors | |
862 // could migrate between free lists. In that case to avoid excessive | |
863 // accumulation we could limit omCount to (omProvision*2), otherwise return | |
864 // the objectMonitor to the global list. We should drain (return) in reasonable chunks. | |
865 // That is, *not* one-at-a-time. | |
866 | |
867 | |
868 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) { | |
869 guarantee (m->object() == NULL, "invariant") ; | |
870 m->FreeNext = Self->omFreeList ; | |
871 Self->omFreeList = m ; | |
872 Self->omFreeCount ++ ; | |
873 } | |
874 | |
875 // Return the monitors of a moribund thread's local free list to | |
876 // the global free list. Typically a thread calls omFlush() when | |
877 // it's dying. We could also consider having the VM thread steal | |
878 // monitors from threads that have not run java code over a few | |
879 // consecutive STW safepoints. Relatedly, we might decay | |
880 // omFreeProvision at STW safepoints. | |
881 // | |
882 // We currently call omFlush() from the Thread:: dtor _after the thread | |
883 // has been excised from the thread list and is no longer a mutator. | |
884 // That means that omFlush() can run concurrently with a safepoint and | |
885 // the scavenge operator. Calling omFlush() from JavaThread::exit() might | |
886 // be a better choice as we could safely reason that that the JVM is | |
887 // not at a safepoint at the time of the call, and thus there could | |
888 // be not inopportune interleavings between omFlush() and the scavenge | |
889 // operator. | |
890 | |
891 void ObjectSynchronizer::omFlush (Thread * Self) { | |
892 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL | |
893 Self->omFreeList = NULL ; | |
894 if (List == NULL) return ; | |
895 ObjectMonitor * Tail = NULL ; | |
896 ObjectMonitor * s ; | |
897 for (s = List ; s != NULL ; s = s->FreeNext) { | |
898 Tail = s ; | |
899 guarantee (s->object() == NULL, "invariant") ; | |
900 guarantee (!s->is_busy(), "invariant") ; | |
901 s->set_owner (NULL) ; // redundant but good hygiene | |
902 TEVENT (omFlush - Move one) ; | |
903 } | |
904 | |
905 guarantee (Tail != NULL && List != NULL, "invariant") ; | |
906 Thread::muxAcquire (&ListLock, "omFlush") ; | |
907 Tail->FreeNext = gFreeList ; | |
908 gFreeList = List ; | |
909 Thread::muxRelease (&ListLock) ; | |
910 TEVENT (omFlush) ; | |
911 } | |
912 | |
913 | |
914 // Get the next block in the block list. | |
915 static inline ObjectMonitor* next(ObjectMonitor* block) { | |
916 assert(block->object() == CHAINMARKER, "must be a block header"); | |
917 block = block->FreeNext ; | |
918 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); | |
919 return block; | |
920 } | |
921 | |
922 // Fast path code shared by multiple functions | |
923 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { | |
924 markOop mark = obj->mark(); | |
925 if (mark->has_monitor()) { | |
926 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); | |
927 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); | |
928 return mark->monitor(); | |
929 } | |
930 return ObjectSynchronizer::inflate(Thread::current(), obj); | |
931 } | |
932 | |
933 // Note that we could encounter some performance loss through false-sharing as | |
934 // multiple locks occupy the same $ line. Padding might be appropriate. | |
935 | |
936 #define NINFLATIONLOCKS 256 | |
937 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ; | |
938 | |
939 static markOop ReadStableMark (oop obj) { | |
940 markOop mark = obj->mark() ; | |
941 if (!mark->is_being_inflated()) { | |
942 return mark ; // normal fast-path return | |
943 } | |
944 | |
945 int its = 0 ; | |
946 for (;;) { | |
947 markOop mark = obj->mark() ; | |
948 if (!mark->is_being_inflated()) { | |
949 return mark ; // normal fast-path return | |
950 } | |
951 | |
952 // The object is being inflated by some other thread. | |
953 // The caller of ReadStableMark() must wait for inflation to complete. | |
954 // Avoid live-lock | |
955 // TODO: consider calling SafepointSynchronize::do_call_back() while | |
956 // spinning to see if there's a safepoint pending. If so, immediately | |
957 // yielding or blocking would be appropriate. Avoid spinning while | |
958 // there is a safepoint pending. | |
959 // TODO: add inflation contention performance counters. | |
960 // TODO: restrict the aggregate number of spinners. | |
961 | |
962 ++its ; | |
963 if (its > 10000 || !os::is_MP()) { | |
964 if (its & 1) { | |
965 os::NakedYield() ; | |
966 TEVENT (Inflate: INFLATING - yield) ; | |
967 } else { | |
968 // Note that the following code attenuates the livelock problem but is not | |
969 // a complete remedy. A more complete solution would require that the inflating | |
970 // thread hold the associated inflation lock. The following code simply restricts | |
971 // the number of spinners to at most one. We'll have N-2 threads blocked | |
972 // on the inflationlock, 1 thread holding the inflation lock and using | |
973 // a yield/park strategy, and 1 thread in the midst of inflation. | |
974 // A more refined approach would be to change the encoding of INFLATING | |
975 // to allow encapsulation of a native thread pointer. Threads waiting for | |
976 // inflation to complete would use CAS to push themselves onto a singly linked | |
977 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag | |
978 // and calling park(). When inflation was complete the thread that accomplished inflation | |
979 // would detach the list and set the markword to inflated with a single CAS and | |
980 // then for each thread on the list, set the flag and unpark() the thread. | |
981 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease | |
982 // wakes at most one thread whereas we need to wake the entire list. | |
983 int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ; | |
984 int YieldThenBlock = 0 ; | |
985 assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ; | |
986 assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ; | |
987 Thread::muxAcquire (InflationLocks + ix, "InflationLock") ; | |
988 while (obj->mark() == markOopDesc::INFLATING()) { | |
989 // Beware: NakedYield() is advisory and has almost no effect on some platforms | |
990 // so we periodically call Self->_ParkEvent->park(1). | |
991 // We use a mixed spin/yield/block mechanism. | |
992 if ((YieldThenBlock++) >= 16) { | |
993 Thread::current()->_ParkEvent->park(1) ; | |
994 } else { | |
995 os::NakedYield() ; | |
996 } | |
997 } | |
998 Thread::muxRelease (InflationLocks + ix ) ; | |
999 TEVENT (Inflate: INFLATING - yield/park) ; | |
1000 } | |
1001 } else { | |
1002 SpinPause() ; // SMP-polite spinning | |
1003 } | |
1004 } | |
1005 } | |
1006 | |
1007 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) { | |
1008 // Inflate mutates the heap ... | |
1009 // Relaxing assertion for bug 6320749. | |
1010 assert (Universe::verify_in_progress() || | |
1011 !SafepointSynchronize::is_at_safepoint(), "invariant") ; | |
1012 | |
1013 for (;;) { | |
1014 const markOop mark = object->mark() ; | |
1015 assert (!mark->has_bias_pattern(), "invariant") ; | |
1016 | |
1017 // The mark can be in one of the following states: | |
1018 // * Inflated - just return | |
1019 // * Stack-locked - coerce it to inflated | |
1020 // * INFLATING - busy wait for conversion to complete | |
1021 // * Neutral - aggressively inflate the object. | |
1022 // * BIASED - Illegal. We should never see this | |
1023 | |
1024 // CASE: inflated | |
1025 if (mark->has_monitor()) { | |
1026 ObjectMonitor * inf = mark->monitor() ; | |
1027 assert (inf->header()->is_neutral(), "invariant"); | |
1028 assert (inf->object() == object, "invariant") ; | |
1029 assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); | |
1030 return inf ; | |
1031 } | |
1032 | |
1033 // CASE: inflation in progress - inflating over a stack-lock. | |
1034 // Some other thread is converting from stack-locked to inflated. | |
1035 // Only that thread can complete inflation -- other threads must wait. | |
1036 // The INFLATING value is transient. | |
1037 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. | |
1038 // We could always eliminate polling by parking the thread on some auxiliary list. | |
1039 if (mark == markOopDesc::INFLATING()) { | |
1040 TEVENT (Inflate: spin while INFLATING) ; | |
1041 ReadStableMark(object) ; | |
1042 continue ; | |
1043 } | |
1044 | |
1045 // CASE: stack-locked | |
1046 // Could be stack-locked either by this thread or by some other thread. | |
1047 // | |
1048 // Note that we allocate the objectmonitor speculatively, _before_ attempting | |
1049 // to install INFLATING into the mark word. We originally installed INFLATING, | |
1050 // allocated the objectmonitor, and then finally STed the address of the | |
1051 // objectmonitor into the mark. This was correct, but artificially lengthened | |
1052 // the interval in which INFLATED appeared in the mark, thus increasing | |
1053 // the odds of inflation contention. | |
1054 // | |
1055 // We now use per-thread private objectmonitor free lists. | |
1056 // These list are reprovisioned from the global free list outside the | |
1057 // critical INFLATING...ST interval. A thread can transfer | |
1058 // multiple objectmonitors en-mass from the global free list to its local free list. | |
1059 // This reduces coherency traffic and lock contention on the global free list. | |
1060 // Using such local free lists, it doesn't matter if the omAlloc() call appears | |
1061 // before or after the CAS(INFLATING) operation. | |
1062 // See the comments in omAlloc(). | |
1063 | |
1064 if (mark->has_locker()) { | |
1065 ObjectMonitor * m = omAlloc (Self) ; | |
1066 // Optimistically prepare the objectmonitor - anticipate successful CAS | |
1067 // We do this before the CAS in order to minimize the length of time | |
1068 // in which INFLATING appears in the mark. | |
1069 m->Recycle(); | |
1070 m->FreeNext = NULL ; | |
1071 m->_Responsible = NULL ; | |
1072 m->OwnerIsThread = 0 ; | |
1073 m->_recursions = 0 ; | |
1074 m->_SpinDuration = Knob_SpinLimit ; // Consider: maintain by type/class | |
1075 | |
1076 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ; | |
1077 if (cmp != mark) { | |
1078 omRelease (Self, m) ; | |
1079 continue ; // Interference -- just retry | |
1080 } | |
1081 | |
1082 // We've successfully installed INFLATING (0) into the mark-word. | |
1083 // This is the only case where 0 will appear in a mark-work. | |
1084 // Only the singular thread that successfully swings the mark-word | |
1085 // to 0 can perform (or more precisely, complete) inflation. | |
1086 // | |
1087 // Why do we CAS a 0 into the mark-word instead of just CASing the | |
1088 // mark-word from the stack-locked value directly to the new inflated state? | |
1089 // Consider what happens when a thread unlocks a stack-locked object. | |
1090 // It attempts to use CAS to swing the displaced header value from the | |
1091 // on-stack basiclock back into the object header. Recall also that the | |
1092 // header value (hashcode, etc) can reside in (a) the object header, or | |
1093 // (b) a displaced header associated with the stack-lock, or (c) a displaced | |
1094 // header in an objectMonitor. The inflate() routine must copy the header | |
1095 // value from the basiclock on the owner's stack to the objectMonitor, all | |
1096 // the while preserving the hashCode stability invariants. If the owner | |
1097 // decides to release the lock while the value is 0, the unlock will fail | |
1098 // and control will eventually pass from slow_exit() to inflate. The owner | |
1099 // will then spin, waiting for the 0 value to disappear. Put another way, | |
1100 // the 0 causes the owner to stall if the owner happens to try to | |
1101 // drop the lock (restoring the header from the basiclock to the object) | |
1102 // while inflation is in-progress. This protocol avoids races that might | |
1103 // would otherwise permit hashCode values to change or "flicker" for an object. | |
1104 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. | |
1105 // 0 serves as a "BUSY" inflate-in-progress indicator. | |
1106 | |
1107 | |
1108 // fetch the displaced mark from the owner's stack. | |
1109 // The owner can't die or unwind past the lock while our INFLATING | |
1110 // object is in the mark. Furthermore the owner can't complete | |
1111 // an unlock on the object, either. | |
1112 markOop dmw = mark->displaced_mark_helper() ; | |
1113 assert (dmw->is_neutral(), "invariant") ; | |
1114 | |
1115 // Setup monitor fields to proper values -- prepare the monitor | |
1116 m->set_header(dmw) ; | |
1117 | |
1118 // Optimization: if the mark->locker stack address is associated | |
1119 // with this thread we could simply set m->_owner = Self and | |
702
b9fba36710f2
6699669: Hotspot server leaves synchronized block with monitor in bad state
xlu
parents:
579
diff
changeset
|
1120 // m->OwnerIsThread = 1. Note that a thread can inflate an object |
0 | 1121 // that it has stack-locked -- as might happen in wait() -- directly |
1122 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. | |
702
b9fba36710f2
6699669: Hotspot server leaves synchronized block with monitor in bad state
xlu
parents:
579
diff
changeset
|
1123 m->set_owner(mark->locker()); |
0 | 1124 m->set_object(object); |
1125 // TODO-FIXME: assert BasicLock->dhw != 0. | |
1126 | |
1127 // Must preserve store ordering. The monitor state must | |
1128 // be stable at the time of publishing the monitor address. | |
1129 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ; | |
1130 object->release_set_mark(markOopDesc::encode(m)); | |
1131 | |
1132 // Hopefully the performance counters are allocated on distinct cache lines | |
1133 // to avoid false sharing on MP systems ... | |
1134 if (_sync_Inflations != NULL) _sync_Inflations->inc() ; | |
1135 TEVENT(Inflate: overwrite stacklock) ; | |
1136 if (TraceMonitorInflation) { | |
1137 if (object->is_instance()) { | |
1138 ResourceMark rm; | |
1139 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", | |
1140 (intptr_t) object, (intptr_t) object->mark(), | |
1141 Klass::cast(object->klass())->external_name()); | |
1142 } | |
1143 } | |
1144 return m ; | |
1145 } | |
1146 | |
1147 // CASE: neutral | |
1148 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. | |
1149 // If we know we're inflating for entry it's better to inflate by swinging a | |
1150 // pre-locked objectMonitor pointer into the object header. A successful | |
1151 // CAS inflates the object *and* confers ownership to the inflating thread. | |
1152 // In the current implementation we use a 2-step mechanism where we CAS() | |
1153 // to inflate and then CAS() again to try to swing _owner from NULL to Self. | |
1154 // An inflateTry() method that we could call from fast_enter() and slow_enter() | |
1155 // would be useful. | |
1156 | |
1157 assert (mark->is_neutral(), "invariant"); | |
1158 ObjectMonitor * m = omAlloc (Self) ; | |
1159 // prepare m for installation - set monitor to initial state | |
1160 m->Recycle(); | |
1161 m->set_header(mark); | |
1162 m->set_owner(NULL); | |
1163 m->set_object(object); | |
1164 m->OwnerIsThread = 1 ; | |
1165 m->_recursions = 0 ; | |
1166 m->FreeNext = NULL ; | |
1167 m->_Responsible = NULL ; | |
1168 m->_SpinDuration = Knob_SpinLimit ; // consider: keep metastats by type/class | |
1169 | |
1170 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { | |
1171 m->set_object (NULL) ; | |
1172 m->set_owner (NULL) ; | |
1173 m->OwnerIsThread = 0 ; | |
1174 m->Recycle() ; | |
1175 omRelease (Self, m) ; | |
1176 m = NULL ; | |
1177 continue ; | |
1178 // interference - the markword changed - just retry. | |
1179 // The state-transitions are one-way, so there's no chance of | |
1180 // live-lock -- "Inflated" is an absorbing state. | |
1181 } | |
1182 | |
1183 // Hopefully the performance counters are allocated on distinct | |
1184 // cache lines to avoid false sharing on MP systems ... | |
1185 if (_sync_Inflations != NULL) _sync_Inflations->inc() ; | |
1186 TEVENT(Inflate: overwrite neutral) ; | |
1187 if (TraceMonitorInflation) { | |
1188 if (object->is_instance()) { | |
1189 ResourceMark rm; | |
1190 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", | |
1191 (intptr_t) object, (intptr_t) object->mark(), | |
1192 Klass::cast(object->klass())->external_name()); | |
1193 } | |
1194 } | |
1195 return m ; | |
1196 } | |
1197 } | |
1198 | |
1199 | |
1200 // This the fast monitor enter. The interpreter and compiler use | |
1201 // some assembly copies of this code. Make sure update those code | |
1202 // if the following function is changed. The implementation is | |
1203 // extremely sensitive to race condition. Be careful. | |
1204 | |
1205 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) { | |
1206 if (UseBiasedLocking) { | |
1207 if (!SafepointSynchronize::is_at_safepoint()) { | |
1208 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); | |
1209 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { | |
1210 return; | |
1211 } | |
1212 } else { | |
1213 assert(!attempt_rebias, "can not rebias toward VM thread"); | |
1214 BiasedLocking::revoke_at_safepoint(obj); | |
1215 } | |
1216 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
702
b9fba36710f2
6699669: Hotspot server leaves synchronized block with monitor in bad state
xlu
parents:
579
diff
changeset
|
1217 } |
b9fba36710f2
6699669: Hotspot server leaves synchronized block with monitor in bad state
xlu
parents:
579
diff
changeset
|
1218 |
b9fba36710f2
6699669: Hotspot server leaves synchronized block with monitor in bad state
xlu
parents:
579
diff
changeset
|
1219 slow_enter (obj, lock, THREAD) ; |
0 | 1220 } |
1221 | |
1222 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { | |
1223 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); | |
1224 // if displaced header is null, the previous enter is recursive enter, no-op | |
1225 markOop dhw = lock->displaced_header(); | |
1226 markOop mark ; | |
1227 if (dhw == NULL) { | |
1228 // Recursive stack-lock. | |
1229 // Diagnostics -- Could be: stack-locked, inflating, inflated. | |
1230 mark = object->mark() ; | |
1231 assert (!mark->is_neutral(), "invariant") ; | |
1232 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { | |
1233 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ; | |
1234 } | |
1235 if (mark->has_monitor()) { | |
1236 ObjectMonitor * m = mark->monitor() ; | |
1237 assert(((oop)(m->object()))->mark() == mark, "invariant") ; | |
1238 assert(m->is_entered(THREAD), "invariant") ; | |
1239 } | |
1240 return ; | |
1241 } | |
1242 | |
1243 mark = object->mark() ; | |
1244 | |
1245 // If the object is stack-locked by the current thread, try to | |
1246 // swing the displaced header from the box back to the mark. | |
1247 if (mark == (markOop) lock) { | |
1248 assert (dhw->is_neutral(), "invariant") ; | |
1249 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { | |
1250 TEVENT (fast_exit: release stacklock) ; | |
1251 return; | |
1252 } | |
1253 } | |
1254 | |
1255 ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ; | |
1256 } | |
1257 | |
1258 // This routine is used to handle interpreter/compiler slow case | |
1259 // We don't need to use fast path here, because it must have been | |
1260 // failed in the interpreter/compiler code. | |
1261 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { | |
1262 markOop mark = obj->mark(); | |
1263 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); | |
1264 | |
1265 if (mark->is_neutral()) { | |
1266 // Anticipate successful CAS -- the ST of the displaced mark must | |
1267 // be visible <= the ST performed by the CAS. | |
1268 lock->set_displaced_header(mark); | |
1269 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { | |
1270 TEVENT (slow_enter: release stacklock) ; | |
1271 return ; | |
1272 } | |
1273 // Fall through to inflate() ... | |
1274 } else | |
1275 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { | |
1276 assert(lock != mark->locker(), "must not re-lock the same lock"); | |
1277 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); | |
1278 lock->set_displaced_header(NULL); | |
1279 return; | |
1280 } | |
1281 | |
1282 #if 0 | |
1283 // The following optimization isn't particularly useful. | |
1284 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { | |
1285 lock->set_displaced_header (NULL) ; | |
1286 return ; | |
1287 } | |
1288 #endif | |
1289 | |
1290 // The object header will never be displaced to this lock, | |
1291 // so it does not matter what the value is, except that it | |
1292 // must be non-zero to avoid looking like a re-entrant lock, | |
1293 // and must not look locked either. | |
1294 lock->set_displaced_header(markOopDesc::unused_mark()); | |
1295 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); | |
1296 } | |
1297 | |
1298 // This routine is used to handle interpreter/compiler slow case | |
1299 // We don't need to use fast path here, because it must have | |
1300 // failed in the interpreter/compiler code. Simply use the heavy | |
1301 // weight monitor should be ok, unless someone find otherwise. | |
1302 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { | |
1303 fast_exit (object, lock, THREAD) ; | |
1304 } | |
1305 | |
1306 // NOTE: must use heavy weight monitor to handle jni monitor enter | |
1307 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter | |
1308 // the current locking is from JNI instead of Java code | |
1309 TEVENT (jni_enter) ; | |
1310 if (UseBiasedLocking) { | |
1311 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
1312 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1313 } | |
1314 THREAD->set_current_pending_monitor_is_from_java(false); | |
1315 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); | |
1316 THREAD->set_current_pending_monitor_is_from_java(true); | |
1317 } | |
1318 | |
1319 // NOTE: must use heavy weight monitor to handle jni monitor enter | |
1320 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) { | |
1321 if (UseBiasedLocking) { | |
1322 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
1323 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1324 } | |
1325 | |
1326 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj()); | |
1327 return monitor->try_enter(THREAD); | |
1328 } | |
1329 | |
1330 | |
1331 // NOTE: must use heavy weight monitor to handle jni monitor exit | |
1332 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { | |
1333 TEVENT (jni_exit) ; | |
1334 if (UseBiasedLocking) { | |
1335 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
1336 } | |
1337 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1338 | |
1339 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); | |
1340 // If this thread has locked the object, exit the monitor. Note: can't use | |
1341 // monitor->check(CHECK); must exit even if an exception is pending. | |
1342 if (monitor->check(THREAD)) { | |
1343 monitor->exit(THREAD); | |
1344 } | |
1345 } | |
1346 | |
1347 // complete_exit()/reenter() are used to wait on a nested lock | |
1348 // i.e. to give up an outer lock completely and then re-enter | |
1349 // Used when holding nested locks - lock acquisition order: lock1 then lock2 | |
1350 // 1) complete_exit lock1 - saving recursion count | |
1351 // 2) wait on lock2 | |
1352 // 3) when notified on lock2, unlock lock2 | |
1353 // 4) reenter lock1 with original recursion count | |
1354 // 5) lock lock2 | |
1355 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() | |
1356 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { | |
1357 TEVENT (complete_exit) ; | |
1358 if (UseBiasedLocking) { | |
1359 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
1360 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1361 } | |
1362 | |
1363 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); | |
1364 | |
1365 return monitor->complete_exit(THREAD); | |
1366 } | |
1367 | |
1368 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() | |
1369 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { | |
1370 TEVENT (reenter) ; | |
1371 if (UseBiasedLocking) { | |
1372 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
1373 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1374 } | |
1375 | |
1376 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); | |
1377 | |
1378 monitor->reenter(recursion, THREAD); | |
1379 } | |
1380 | |
1381 // This exists only as a workaround of dtrace bug 6254741 | |
1382 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { | |
1383 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); | |
1384 return 0; | |
1385 } | |
1386 | |
1387 // NOTE: must use heavy weight monitor to handle wait() | |
1388 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { | |
1389 if (UseBiasedLocking) { | |
1390 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
1391 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1392 } | |
1393 if (millis < 0) { | |
1394 TEVENT (wait - throw IAX) ; | |
1395 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); | |
1396 } | |
1397 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); | |
1398 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); | |
1399 monitor->wait(millis, true, THREAD); | |
1400 | |
1401 /* This dummy call is in place to get around dtrace bug 6254741. Once | |
1402 that's fixed we can uncomment the following line and remove the call */ | |
1403 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); | |
1404 dtrace_waited_probe(monitor, obj, THREAD); | |
1405 } | |
1406 | |
1407 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) { | |
1408 if (UseBiasedLocking) { | |
1409 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
1410 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1411 } | |
1412 if (millis < 0) { | |
1413 TEVENT (wait - throw IAX) ; | |
1414 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); | |
1415 } | |
1416 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ; | |
1417 } | |
1418 | |
1419 void ObjectSynchronizer::notify(Handle obj, TRAPS) { | |
1420 if (UseBiasedLocking) { | |
1421 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
1422 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1423 } | |
1424 | |
1425 markOop mark = obj->mark(); | |
1426 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { | |
1427 return; | |
1428 } | |
1429 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); | |
1430 } | |
1431 | |
1432 // NOTE: see comment of notify() | |
1433 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { | |
1434 if (UseBiasedLocking) { | |
1435 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
1436 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1437 } | |
1438 | |
1439 markOop mark = obj->mark(); | |
1440 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { | |
1441 return; | |
1442 } | |
1443 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); | |
1444 } | |
1445 | |
1446 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) { | |
1447 if (UseBiasedLocking) { | |
1448 // NOTE: many places throughout the JVM do not expect a safepoint | |
1449 // to be taken here, in particular most operations on perm gen | |
1450 // objects. However, we only ever bias Java instances and all of | |
1451 // the call sites of identity_hash that might revoke biases have | |
1452 // been checked to make sure they can handle a safepoint. The | |
1453 // added check of the bias pattern is to avoid useless calls to | |
1454 // thread-local storage. | |
1455 if (obj->mark()->has_bias_pattern()) { | |
1456 // Box and unbox the raw reference just in case we cause a STW safepoint. | |
1457 Handle hobj (Self, obj) ; | |
1458 // Relaxing assertion for bug 6320749. | |
1459 assert (Universe::verify_in_progress() || | |
1460 !SafepointSynchronize::is_at_safepoint(), | |
1461 "biases should not be seen by VM thread here"); | |
1462 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); | |
1463 obj = hobj() ; | |
1464 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1465 } | |
1466 } | |
1467 | |
1468 // hashCode() is a heap mutator ... | |
1469 // Relaxing assertion for bug 6320749. | |
1470 assert (Universe::verify_in_progress() || | |
1471 !SafepointSynchronize::is_at_safepoint(), "invariant") ; | |
1472 assert (Universe::verify_in_progress() || | |
1473 Self->is_Java_thread() , "invariant") ; | |
1474 assert (Universe::verify_in_progress() || | |
1475 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; | |
1476 | |
1477 ObjectMonitor* monitor = NULL; | |
1478 markOop temp, test; | |
1479 intptr_t hash; | |
1480 markOop mark = ReadStableMark (obj); | |
1481 | |
1482 // object should remain ineligible for biased locking | |
1483 assert (!mark->has_bias_pattern(), "invariant") ; | |
1484 | |
1485 if (mark->is_neutral()) { | |
1486 hash = mark->hash(); // this is a normal header | |
1487 if (hash) { // if it has hash, just return it | |
1488 return hash; | |
1489 } | |
1490 hash = get_next_hash(Self, obj); // allocate a new hash code | |
1491 temp = mark->copy_set_hash(hash); // merge the hash code into header | |
1492 // use (machine word version) atomic operation to install the hash | |
1493 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); | |
1494 if (test == mark) { | |
1495 return hash; | |
1496 } | |
1497 // If atomic operation failed, we must inflate the header | |
1498 // into heavy weight monitor. We could add more code here | |
1499 // for fast path, but it does not worth the complexity. | |
1500 } else if (mark->has_monitor()) { | |
1501 monitor = mark->monitor(); | |
1502 temp = monitor->header(); | |
1503 assert (temp->is_neutral(), "invariant") ; | |
1504 hash = temp->hash(); | |
1505 if (hash) { | |
1506 return hash; | |
1507 } | |
1508 // Skip to the following code to reduce code size | |
1509 } else if (Self->is_lock_owned((address)mark->locker())) { | |
1510 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned | |
1511 assert (temp->is_neutral(), "invariant") ; | |
1512 hash = temp->hash(); // by current thread, check if the displaced | |
1513 if (hash) { // header contains hash code | |
1514 return hash; | |
1515 } | |
1516 // WARNING: | |
1517 // The displaced header is strictly immutable. | |
1518 // It can NOT be changed in ANY cases. So we have | |
1519 // to inflate the header into heavyweight monitor | |
1520 // even the current thread owns the lock. The reason | |
1521 // is the BasicLock (stack slot) will be asynchronously | |
1522 // read by other threads during the inflate() function. | |
1523 // Any change to stack may not propagate to other threads | |
1524 // correctly. | |
1525 } | |
1526 | |
1527 // Inflate the monitor to set hash code | |
1528 monitor = ObjectSynchronizer::inflate(Self, obj); | |
1529 // Load displaced header and check it has hash code | |
1530 mark = monitor->header(); | |
1531 assert (mark->is_neutral(), "invariant") ; | |
1532 hash = mark->hash(); | |
1533 if (hash == 0) { | |
1534 hash = get_next_hash(Self, obj); | |
1535 temp = mark->copy_set_hash(hash); // merge hash code into header | |
1536 assert (temp->is_neutral(), "invariant") ; | |
1537 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); | |
1538 if (test != mark) { | |
1539 // The only update to the header in the monitor (outside GC) | |
1540 // is install the hash code. If someone add new usage of | |
1541 // displaced header, please update this code | |
1542 hash = test->hash(); | |
1543 assert (test->is_neutral(), "invariant") ; | |
1544 assert (hash != 0, "Trivial unexpected object/monitor header usage."); | |
1545 } | |
1546 } | |
1547 // We finally get the hash | |
1548 return hash; | |
1549 } | |
1550 | |
1551 // Deprecated -- use FastHashCode() instead. | |
1552 | |
1553 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { | |
1554 return FastHashCode (Thread::current(), obj()) ; | |
1555 } | |
1556 | |
1557 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, | |
1558 Handle h_obj) { | |
1559 if (UseBiasedLocking) { | |
1560 BiasedLocking::revoke_and_rebias(h_obj, false, thread); | |
1561 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1562 } | |
1563 | |
1564 assert(thread == JavaThread::current(), "Can only be called on current thread"); | |
1565 oop obj = h_obj(); | |
1566 | |
1567 markOop mark = ReadStableMark (obj) ; | |
1568 | |
1569 // Uncontended case, header points to stack | |
1570 if (mark->has_locker()) { | |
1571 return thread->is_lock_owned((address)mark->locker()); | |
1572 } | |
1573 // Contended case, header points to ObjectMonitor (tagged pointer) | |
1574 if (mark->has_monitor()) { | |
1575 ObjectMonitor* monitor = mark->monitor(); | |
1576 return monitor->is_entered(thread) != 0 ; | |
1577 } | |
1578 // Unlocked case, header in place | |
1579 assert(mark->is_neutral(), "sanity check"); | |
1580 return false; | |
1581 } | |
1582 | |
1583 // Be aware of this method could revoke bias of the lock object. | |
1584 // This method querys the ownership of the lock handle specified by 'h_obj'. | |
1585 // If the current thread owns the lock, it returns owner_self. If no | |
1586 // thread owns the lock, it returns owner_none. Otherwise, it will return | |
1587 // ower_other. | |
1588 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership | |
1589 (JavaThread *self, Handle h_obj) { | |
1590 // The caller must beware this method can revoke bias, and | |
1591 // revocation can result in a safepoint. | |
1592 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; | |
1593 assert (self->thread_state() != _thread_blocked , "invariant") ; | |
1594 | |
1595 // Possible mark states: neutral, biased, stack-locked, inflated | |
1596 | |
1597 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { | |
1598 // CASE: biased | |
1599 BiasedLocking::revoke_and_rebias(h_obj, false, self); | |
1600 assert(!h_obj->mark()->has_bias_pattern(), | |
1601 "biases should be revoked by now"); | |
1602 } | |
1603 | |
1604 assert(self == JavaThread::current(), "Can only be called on current thread"); | |
1605 oop obj = h_obj(); | |
1606 markOop mark = ReadStableMark (obj) ; | |
1607 | |
1608 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. | |
1609 if (mark->has_locker()) { | |
1610 return self->is_lock_owned((address)mark->locker()) ? | |
1611 owner_self : owner_other; | |
1612 } | |
1613 | |
1614 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. | |
1615 // The Object:ObjectMonitor relationship is stable as long as we're | |
1616 // not at a safepoint. | |
1617 if (mark->has_monitor()) { | |
1618 void * owner = mark->monitor()->_owner ; | |
1619 if (owner == NULL) return owner_none ; | |
1620 return (owner == self || | |
1621 self->is_lock_owned((address)owner)) ? owner_self : owner_other; | |
1622 } | |
1623 | |
1624 // CASE: neutral | |
1625 assert(mark->is_neutral(), "sanity check"); | |
1626 return owner_none ; // it's unlocked | |
1627 } | |
1628 | |
1629 // FIXME: jvmti should call this | |
1630 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { | |
1631 if (UseBiasedLocking) { | |
1632 if (SafepointSynchronize::is_at_safepoint()) { | |
1633 BiasedLocking::revoke_at_safepoint(h_obj); | |
1634 } else { | |
1635 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); | |
1636 } | |
1637 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
1638 } | |
1639 | |
1640 oop obj = h_obj(); | |
1641 address owner = NULL; | |
1642 | |
1643 markOop mark = ReadStableMark (obj) ; | |
1644 | |
1645 // Uncontended case, header points to stack | |
1646 if (mark->has_locker()) { | |
1647 owner = (address) mark->locker(); | |
1648 } | |
1649 | |
1650 // Contended case, header points to ObjectMonitor (tagged pointer) | |
1651 if (mark->has_monitor()) { | |
1652 ObjectMonitor* monitor = mark->monitor(); | |
1653 assert(monitor != NULL, "monitor should be non-null"); | |
1654 owner = (address) monitor->owner(); | |
1655 } | |
1656 | |
1657 if (owner != NULL) { | |
1658 return Threads::owning_thread_from_monitor_owner(owner, doLock); | |
1659 } | |
1660 | |
1661 // Unlocked case, header in place | |
1662 // Cannot have assertion since this object may have been | |
1663 // locked by another thread when reaching here. | |
1664 // assert(mark->is_neutral(), "sanity check"); | |
1665 | |
1666 return NULL; | |
1667 } | |
1668 | |
1669 // Iterate through monitor cache and attempt to release thread's monitors | |
1670 // Gives up on a particular monitor if an exception occurs, but continues | |
1671 // the overall iteration, swallowing the exception. | |
1672 class ReleaseJavaMonitorsClosure: public MonitorClosure { | |
1673 private: | |
1674 TRAPS; | |
1675 | |
1676 public: | |
1677 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} | |
1678 void do_monitor(ObjectMonitor* mid) { | |
1679 if (mid->owner() == THREAD) { | |
1680 (void)mid->complete_exit(CHECK); | |
1681 } | |
1682 } | |
1683 }; | |
1684 | |
1685 // Release all inflated monitors owned by THREAD. Lightweight monitors are | |
1686 // ignored. This is meant to be called during JNI thread detach which assumes | |
1687 // all remaining monitors are heavyweight. All exceptions are swallowed. | |
1688 // Scanning the extant monitor list can be time consuming. | |
1689 // A simple optimization is to add a per-thread flag that indicates a thread | |
1690 // called jni_monitorenter() during its lifetime. | |
1691 // | |
1692 // Instead of No_Savepoint_Verifier it might be cheaper to | |
1693 // use an idiom of the form: | |
1694 // auto int tmp = SafepointSynchronize::_safepoint_counter ; | |
1695 // <code that must not run at safepoint> | |
1696 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; | |
1697 // Since the tests are extremely cheap we could leave them enabled | |
1698 // for normal product builds. | |
1699 | |
1700 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { | |
1701 assert(THREAD == JavaThread::current(), "must be current Java thread"); | |
1702 No_Safepoint_Verifier nsv ; | |
1703 ReleaseJavaMonitorsClosure rjmc(THREAD); | |
1704 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); | |
1705 ObjectSynchronizer::monitors_iterate(&rjmc); | |
1706 Thread::muxRelease(&ListLock); | |
1707 THREAD->clear_pending_exception(); | |
1708 } | |
1709 | |
1710 // Visitors ... | |
1711 | |
1712 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { | |
1713 ObjectMonitor* block = gBlockList; | |
1714 ObjectMonitor* mid; | |
1715 while (block) { | |
1716 assert(block->object() == CHAINMARKER, "must be a block header"); | |
1717 for (int i = _BLOCKSIZE - 1; i > 0; i--) { | |
1718 mid = block + i; | |
1719 oop object = (oop) mid->object(); | |
1720 if (object != NULL) { | |
1721 closure->do_monitor(mid); | |
1722 } | |
1723 } | |
1724 block = (ObjectMonitor*) block->FreeNext; | |
1725 } | |
1726 } | |
1727 | |
1728 void ObjectSynchronizer::oops_do(OopClosure* f) { | |
1729 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); | |
1730 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { | |
1731 assert(block->object() == CHAINMARKER, "must be a block header"); | |
1732 for (int i = 1; i < _BLOCKSIZE; i++) { | |
1733 ObjectMonitor* mid = &block[i]; | |
1734 if (mid->object() != NULL) { | |
1735 f->do_oop((oop*)mid->object_addr()); | |
1736 } | |
1737 } | |
1738 } | |
1739 } | |
1740 | |
1741 // Deflate_idle_monitors() is called at all safepoints, immediately | |
1742 // after all mutators are stopped, but before any objects have moved. | |
1743 // It traverses the list of known monitors, deflating where possible. | |
1744 // The scavenged monitor are returned to the monitor free list. | |
1745 // | |
1746 // Beware that we scavenge at *every* stop-the-world point. | |
1747 // Having a large number of monitors in-circulation negatively | |
1748 // impacts the performance of some applications (e.g., PointBase). | |
1749 // Broadly, we want to minimize the # of monitors in circulation. | |
1750 // Alternately, we could partition the active monitors into sub-lists | |
1751 // of those that need scanning and those that do not. | |
1752 // Specifically, we would add a new sub-list of objectmonitors | |
1753 // that are in-circulation and potentially active. deflate_idle_monitors() | |
1754 // would scan only that list. Other monitors could reside on a quiescent | |
1755 // list. Such sequestered monitors wouldn't need to be scanned by | |
1756 // deflate_idle_monitors(). omAlloc() would first check the global free list, | |
1757 // then the quiescent list, and, failing those, would allocate a new block. | |
1758 // Deflate_idle_monitors() would scavenge and move monitors to the | |
1759 // quiescent list. | |
1760 // | |
1761 // Perversely, the heap size -- and thus the STW safepoint rate -- | |
1762 // typically drives the scavenge rate. Large heaps can mean infrequent GC, | |
1763 // which in turn can mean large(r) numbers of objectmonitors in circulation. | |
1764 // This is an unfortunate aspect of this design. | |
1765 // | |
1766 // Another refinement would be to refrain from calling deflate_idle_monitors() | |
1767 // except at stop-the-world points associated with garbage collections. | |
1768 // | |
1769 // An even better solution would be to deflate on-the-fly, aggressively, | |
1770 // at monitorexit-time as is done in EVM's metalock or Relaxed Locks. | |
1771 | |
1772 void ObjectSynchronizer::deflate_idle_monitors() { | |
1773 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); | |
1774 int nInuse = 0 ; // currently associated with objects | |
1775 int nInCirculation = 0 ; // extant | |
1776 int nScavenged = 0 ; // reclaimed | |
1777 | |
1778 ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors | |
1779 ObjectMonitor * FreeTail = NULL ; | |
1780 | |
1781 // Iterate over all extant monitors - Scavenge all idle monitors. | |
1782 TEVENT (deflate_idle_monitors) ; | |
1783 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { | |
1784 assert(block->object() == CHAINMARKER, "must be a block header"); | |
1785 nInCirculation += _BLOCKSIZE ; | |
1786 for (int i = 1 ; i < _BLOCKSIZE; i++) { | |
1787 ObjectMonitor* mid = &block[i]; | |
1788 oop obj = (oop) mid->object(); | |
1789 | |
1790 if (obj == NULL) { | |
1791 // The monitor is not associated with an object. | |
1792 // The monitor should either be a thread-specific private | |
1793 // free list or the global free list. | |
1794 // obj == NULL IMPLIES mid->is_busy() == 0 | |
1795 guarantee (!mid->is_busy(), "invariant") ; | |
1796 continue ; | |
1797 } | |
1798 | |
1799 // Normal case ... The monitor is associated with obj. | |
1800 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ; | |
1801 guarantee (mid == obj->mark()->monitor(), "invariant"); | |
1802 guarantee (mid->header()->is_neutral(), "invariant"); | |
1803 | |
1804 if (mid->is_busy()) { | |
1805 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ; | |
1806 nInuse ++ ; | |
1807 } else { | |
1808 // Deflate the monitor if it is no longer being used | |
1809 // It's idle - scavenge and return to the global free list | |
1810 // plain old deflation ... | |
1811 TEVENT (deflate_idle_monitors - scavenge1) ; | |
1812 if (TraceMonitorInflation) { | |
1813 if (obj->is_instance()) { | |
1814 ResourceMark rm; | |
1815 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", | |
1816 (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name()); | |
1817 } | |
1818 } | |
1819 | |
1820 // Restore the header back to obj | |
1821 obj->release_set_mark(mid->header()); | |
1822 mid->clear(); | |
1823 | |
1824 assert (mid->object() == NULL, "invariant") ; | |
1825 | |
1826 // Move the object to the working free list defined by FreeHead,FreeTail. | |
1827 mid->FreeNext = NULL ; | |
1828 if (FreeHead == NULL) FreeHead = mid ; | |
1829 if (FreeTail != NULL) FreeTail->FreeNext = mid ; | |
1830 FreeTail = mid ; | |
1831 nScavenged ++ ; | |
1832 } | |
1833 } | |
1834 } | |
1835 | |
1836 // Move the scavenged monitors back to the global free list. | |
1837 // In theory we don't need the freelist lock as we're at a STW safepoint. | |
1838 // omAlloc() and omFree() can only be called while a thread is _not in safepoint state. | |
1839 // But it's remotely possible that omFlush() or release_monitors_owned_by_thread() | |
1840 // might be called while not at a global STW safepoint. In the interest of | |
1841 // safety we protect the following access with ListLock. | |
1842 // An even more conservative and prudent approach would be to guard | |
1843 // the main loop in scavenge_idle_monitors() with ListLock. | |
1844 if (FreeHead != NULL) { | |
1845 guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ; | |
1846 assert (FreeTail->FreeNext == NULL, "invariant") ; | |
1847 // constant-time list splice - prepend scavenged segment to gFreeList | |
1848 Thread::muxAcquire (&ListLock, "scavenge - return") ; | |
1849 FreeTail->FreeNext = gFreeList ; | |
1850 gFreeList = FreeHead ; | |
1851 Thread::muxRelease (&ListLock) ; | |
1852 } | |
1853 | |
1854 if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ; | |
1855 if (_sync_MonExtant != NULL) _sync_MonExtant ->set_value(nInCirculation); | |
1856 | |
1857 // TODO: Add objectMonitor leak detection. | |
1858 // Audit/inventory the objectMonitors -- make sure they're all accounted for. | |
1859 GVars.stwRandom = os::random() ; | |
1860 GVars.stwCycle ++ ; | |
1861 } | |
1862 | |
1863 // A macro is used below because there may already be a pending | |
1864 // exception which should not abort the execution of the routines | |
1865 // which use this (which is why we don't put this into check_slow and | |
1866 // call it with a CHECK argument). | |
1867 | |
1868 #define CHECK_OWNER() \ | |
1869 do { \ | |
1870 if (THREAD != _owner) { \ | |
1871 if (THREAD->is_lock_owned((address) _owner)) { \ | |
1872 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \ | |
1873 _recursions = 0; \ | |
1874 OwnerIsThread = 1 ; \ | |
1875 } else { \ | |
1876 TEVENT (Throw IMSX) ; \ | |
1877 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \ | |
1878 } \ | |
1879 } \ | |
1880 } while (false) | |
1881 | |
1882 // TODO-FIXME: eliminate ObjectWaiters. Replace this visitor/enumerator | |
1883 // interface with a simple FirstWaitingThread(), NextWaitingThread() interface. | |
1884 | |
1885 ObjectWaiter* ObjectMonitor::first_waiter() { | |
1886 return _WaitSet; | |
1887 } | |
1888 | |
1889 ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) { | |
1890 return o->_next; | |
1891 } | |
1892 | |
1893 Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) { | |
1894 return o->_thread; | |
1895 } | |
1896 | |
1897 // initialize the monitor, exception the semaphore, all other fields | |
1898 // are simple integers or pointers | |
1899 ObjectMonitor::ObjectMonitor() { | |
1900 _header = NULL; | |
1901 _count = 0; | |
1902 _waiters = 0, | |
1903 _recursions = 0; | |
1904 _object = NULL; | |
1905 _owner = NULL; | |
1906 _WaitSet = NULL; | |
1907 _WaitSetLock = 0 ; | |
1908 _Responsible = NULL ; | |
1909 _succ = NULL ; | |
1910 _cxq = NULL ; | |
1911 FreeNext = NULL ; | |
1912 _EntryList = NULL ; | |
1913 _SpinFreq = 0 ; | |
1914 _SpinClock = 0 ; | |
1915 OwnerIsThread = 0 ; | |
1916 } | |
1917 | |
1918 ObjectMonitor::~ObjectMonitor() { | |
1919 // TODO: Add asserts ... | |
1920 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0 | |
1921 // _count == 0 _EntryList == NULL etc | |
1922 } | |
1923 | |
1924 intptr_t ObjectMonitor::is_busy() const { | |
1925 // TODO-FIXME: merge _count and _waiters. | |
1926 // TODO-FIXME: assert _owner == null implies _recursions = 0 | |
1927 // TODO-FIXME: assert _WaitSet != null implies _count > 0 | |
1928 return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ; | |
1929 } | |
1930 | |
1931 void ObjectMonitor::Recycle () { | |
1932 // TODO: add stronger asserts ... | |
1933 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0 | |
1934 // _count == 0 EntryList == NULL | |
1935 // _recursions == 0 _WaitSet == NULL | |
1936 // TODO: assert (is_busy()|_recursions) == 0 | |
1937 _succ = NULL ; | |
1938 _EntryList = NULL ; | |
1939 _cxq = NULL ; | |
1940 _WaitSet = NULL ; | |
1941 _recursions = 0 ; | |
1942 _SpinFreq = 0 ; | |
1943 _SpinClock = 0 ; | |
1944 OwnerIsThread = 0 ; | |
1945 } | |
1946 | |
1947 // WaitSet management ... | |
1948 | |
1949 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) { | |
1950 assert(node != NULL, "should not dequeue NULL node"); | |
1951 assert(node->_prev == NULL, "node already in list"); | |
1952 assert(node->_next == NULL, "node already in list"); | |
1953 // put node at end of queue (circular doubly linked list) | |
1954 if (_WaitSet == NULL) { | |
1955 _WaitSet = node; | |
1956 node->_prev = node; | |
1957 node->_next = node; | |
1958 } else { | |
1959 ObjectWaiter* head = _WaitSet ; | |
1960 ObjectWaiter* tail = head->_prev; | |
1961 assert(tail->_next == head, "invariant check"); | |
1962 tail->_next = node; | |
1963 head->_prev = node; | |
1964 node->_next = head; | |
1965 node->_prev = tail; | |
1966 } | |
1967 } | |
1968 | |
1969 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() { | |
1970 // dequeue the very first waiter | |
1971 ObjectWaiter* waiter = _WaitSet; | |
1972 if (waiter) { | |
1973 DequeueSpecificWaiter(waiter); | |
1974 } | |
1975 return waiter; | |
1976 } | |
1977 | |
1978 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) { | |
1979 assert(node != NULL, "should not dequeue NULL node"); | |
1980 assert(node->_prev != NULL, "node already removed from list"); | |
1981 assert(node->_next != NULL, "node already removed from list"); | |
1982 // when the waiter has woken up because of interrupt, | |
1983 // timeout or other spurious wake-up, dequeue the | |
1984 // waiter from waiting list | |
1985 ObjectWaiter* next = node->_next; | |
1986 if (next == node) { | |
1987 assert(node->_prev == node, "invariant check"); | |
1988 _WaitSet = NULL; | |
1989 } else { | |
1990 ObjectWaiter* prev = node->_prev; | |
1991 assert(prev->_next == node, "invariant check"); | |
1992 assert(next->_prev == node, "invariant check"); | |
1993 next->_prev = prev; | |
1994 prev->_next = next; | |
1995 if (_WaitSet == node) { | |
1996 _WaitSet = next; | |
1997 } | |
1998 } | |
1999 node->_next = NULL; | |
2000 node->_prev = NULL; | |
2001 } | |
2002 | |
2003 static char * kvGet (char * kvList, const char * Key) { | |
2004 if (kvList == NULL) return NULL ; | |
2005 size_t n = strlen (Key) ; | |
2006 char * Search ; | |
2007 for (Search = kvList ; *Search ; Search += strlen(Search) + 1) { | |
2008 if (strncmp (Search, Key, n) == 0) { | |
2009 if (Search[n] == '=') return Search + n + 1 ; | |
2010 if (Search[n] == 0) return (char *) "1" ; | |
2011 } | |
2012 } | |
2013 return NULL ; | |
2014 } | |
2015 | |
2016 static int kvGetInt (char * kvList, const char * Key, int Default) { | |
2017 char * v = kvGet (kvList, Key) ; | |
2018 int rslt = v ? ::strtol (v, NULL, 0) : Default ; | |
2019 if (Knob_ReportSettings && v != NULL) { | |
2020 ::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ; | |
2021 ::fflush (stdout) ; | |
2022 } | |
2023 return rslt ; | |
2024 } | |
2025 | |
2026 // By convention we unlink a contending thread from EntryList|cxq immediately | |
2027 // after the thread acquires the lock in ::enter(). Equally, we could defer | |
2028 // unlinking the thread until ::exit()-time. | |
2029 | |
2030 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode) | |
2031 { | |
2032 assert (_owner == Self, "invariant") ; | |
2033 assert (SelfNode->_thread == Self, "invariant") ; | |
2034 | |
2035 if (SelfNode->TState == ObjectWaiter::TS_ENTER) { | |
2036 // Normal case: remove Self from the DLL EntryList . | |
2037 // This is a constant-time operation. | |
2038 ObjectWaiter * nxt = SelfNode->_next ; | |
2039 ObjectWaiter * prv = SelfNode->_prev ; | |
2040 if (nxt != NULL) nxt->_prev = prv ; | |
2041 if (prv != NULL) prv->_next = nxt ; | |
2042 if (SelfNode == _EntryList ) _EntryList = nxt ; | |
2043 assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ; | |
2044 assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ; | |
2045 TEVENT (Unlink from EntryList) ; | |
2046 } else { | |
2047 guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ; | |
2048 // Inopportune interleaving -- Self is still on the cxq. | |
2049 // This usually means the enqueue of self raced an exiting thread. | |
2050 // Normally we'll find Self near the front of the cxq, so | |
2051 // dequeueing is typically fast. If needbe we can accelerate | |
2052 // this with some MCS/CHL-like bidirectional list hints and advisory | |
2053 // back-links so dequeueing from the interior will normally operate | |
2054 // in constant-time. | |
2055 // Dequeue Self from either the head (with CAS) or from the interior | |
2056 // with a linear-time scan and normal non-atomic memory operations. | |
2057 // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList | |
2058 // and then unlink Self from EntryList. We have to drain eventually, | |
2059 // so it might as well be now. | |
2060 | |
2061 ObjectWaiter * v = _cxq ; | |
2062 assert (v != NULL, "invariant") ; | |
2063 if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) { | |
2064 // The CAS above can fail from interference IFF a "RAT" arrived. | |
2065 // In that case Self must be in the interior and can no longer be | |
2066 // at the head of cxq. | |
2067 if (v == SelfNode) { | |
2068 assert (_cxq != v, "invariant") ; | |
2069 v = _cxq ; // CAS above failed - start scan at head of list | |
2070 } | |
2071 ObjectWaiter * p ; | |
2072 ObjectWaiter * q = NULL ; | |
2073 for (p = v ; p != NULL && p != SelfNode; p = p->_next) { | |
2074 q = p ; | |
2075 assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ; | |
2076 } | |
2077 assert (v != SelfNode, "invariant") ; | |
2078 assert (p == SelfNode, "Node not found on cxq") ; | |
2079 assert (p != _cxq, "invariant") ; | |
2080 assert (q != NULL, "invariant") ; | |
2081 assert (q->_next == p, "invariant") ; | |
2082 q->_next = p->_next ; | |
2083 } | |
2084 TEVENT (Unlink from cxq) ; | |
2085 } | |
2086 | |
2087 // Diagnostic hygiene ... | |
2088 SelfNode->_prev = (ObjectWaiter *) 0xBAD ; | |
2089 SelfNode->_next = (ObjectWaiter *) 0xBAD ; | |
2090 SelfNode->TState = ObjectWaiter::TS_RUN ; | |
2091 } | |
2092 | |
2093 // Caveat: TryLock() is not necessarily serializing if it returns failure. | |
2094 // Callers must compensate as needed. | |
2095 | |
2096 int ObjectMonitor::TryLock (Thread * Self) { | |
2097 for (;;) { | |
2098 void * own = _owner ; | |
2099 if (own != NULL) return 0 ; | |
2100 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { | |
2101 // Either guarantee _recursions == 0 or set _recursions = 0. | |
2102 assert (_recursions == 0, "invariant") ; | |
2103 assert (_owner == Self, "invariant") ; | |
2104 // CONSIDER: set or assert that OwnerIsThread == 1 | |
2105 return 1 ; | |
2106 } | |
2107 // The lock had been free momentarily, but we lost the race to the lock. | |
2108 // Interference -- the CAS failed. | |
2109 // We can either return -1 or retry. | |
2110 // Retry doesn't make as much sense because the lock was just acquired. | |
2111 if (true) return -1 ; | |
2112 } | |
2113 } | |
2114 | |
2115 // NotRunnable() -- informed spinning | |
2116 // | |
2117 // Don't bother spinning if the owner is not eligible to drop the lock. | |
2118 // Peek at the owner's schedctl.sc_state and Thread._thread_values and | |
2119 // spin only if the owner thread is _thread_in_Java or _thread_in_vm. | |
2120 // The thread must be runnable in order to drop the lock in timely fashion. | |
2121 // If the _owner is not runnable then spinning will not likely be | |
2122 // successful (profitable). | |
2123 // | |
2124 // Beware -- the thread referenced by _owner could have died | |
2125 // so a simply fetch from _owner->_thread_state might trap. | |
2126 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state. | |
2127 // Because of the lifecycle issues the schedctl and _thread_state values | |
2128 // observed by NotRunnable() might be garbage. NotRunnable must | |
2129 // tolerate this and consider the observed _thread_state value | |
2130 // as advisory. | |
2131 // | |
2132 // Beware too, that _owner is sometimes a BasicLock address and sometimes | |
2133 // a thread pointer. We differentiate the two cases with OwnerIsThread. | |
2134 // Alternately, we might tag the type (thread pointer vs basiclock pointer) | |
2135 // with the LSB of _owner. Another option would be to probablistically probe | |
2136 // the putative _owner->TypeTag value. | |
2137 // | |
2138 // Checking _thread_state isn't perfect. Even if the thread is | |
2139 // in_java it might be blocked on a page-fault or have been preempted | |
2140 // and sitting on a ready/dispatch queue. _thread state in conjunction | |
2141 // with schedctl.sc_state gives us a good picture of what the | |
2142 // thread is doing, however. | |
2143 // | |
2144 // TODO: check schedctl.sc_state. | |
2145 // We'll need to use SafeFetch32() to read from the schedctl block. | |
2146 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/ | |
2147 // | |
2148 // The return value from NotRunnable() is *advisory* -- the | |
2149 // result is based on sampling and is not necessarily coherent. | |
2150 // The caller must tolerate false-negative and false-positive errors. | |
2151 // Spinning, in general, is probabilistic anyway. | |
2152 | |
2153 | |
2154 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) { | |
2155 // Check either OwnerIsThread or ox->TypeTag == 2BAD. | |
2156 if (!OwnerIsThread) return 0 ; | |
2157 | |
2158 if (ox == NULL) return 0 ; | |
2159 | |
2160 // Avoid transitive spinning ... | |
2161 // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L. | |
2162 // Immediately after T1 acquires L it's possible that T2, also | |
2163 // spinning on L, will see L.Owner=T1 and T1._Stalled=L. | |
2164 // This occurs transiently after T1 acquired L but before | |
2165 // T1 managed to clear T1.Stalled. T2 does not need to abort | |
2166 // its spin in this circumstance. | |
2167 intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ; | |
2168 | |
2169 if (BlockedOn == 1) return 1 ; | |
2170 if (BlockedOn != 0) { | |
2171 return BlockedOn != intptr_t(this) && _owner == ox ; | |
2172 } | |
2173 | |
2174 assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ; | |
2175 int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ; | |
2176 // consider also: jst != _thread_in_Java -- but that's overspecific. | |
2177 return jst == _thread_blocked || jst == _thread_in_native ; | |
2178 } | |
2179 | |
2180 | |
2181 // Adaptive spin-then-block - rational spinning | |
2182 // | |
2183 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS | |
2184 // algorithm. On high order SMP systems it would be better to start with | |
2185 // a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH, | |
2186 // a contending thread could enqueue itself on the cxq and then spin locally | |
2187 // on a thread-specific variable such as its ParkEvent._Event flag. | |
2188 // That's left as an exercise for the reader. Note that global spinning is | |
2189 // not problematic on Niagara, as the L2$ serves the interconnect and has both | |
2190 // low latency and massive bandwidth. | |
2191 // | |
2192 // Broadly, we can fix the spin frequency -- that is, the % of contended lock | |
2193 // acquisition attempts where we opt to spin -- at 100% and vary the spin count | |
2194 // (duration) or we can fix the count at approximately the duration of | |
2195 // a context switch and vary the frequency. Of course we could also | |
2196 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor. | |
2197 // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html. | |
2198 // | |
2199 // This implementation varies the duration "D", where D varies with | |
2200 // the success rate of recent spin attempts. (D is capped at approximately | |
2201 // length of a round-trip context switch). The success rate for recent | |
2202 // spin attempts is a good predictor of the success rate of future spin | |
2203 // attempts. The mechanism adapts automatically to varying critical | |
2204 // section length (lock modality), system load and degree of parallelism. | |
2205 // D is maintained per-monitor in _SpinDuration and is initialized | |
2206 // optimistically. Spin frequency is fixed at 100%. | |
2207 // | |
2208 // Note that _SpinDuration is volatile, but we update it without locks | |
2209 // or atomics. The code is designed so that _SpinDuration stays within | |
2210 // a reasonable range even in the presence of races. The arithmetic | |
2211 // operations on _SpinDuration are closed over the domain of legal values, | |
2212 // so at worst a race will install and older but still legal value. | |
2213 // At the very worst this introduces some apparent non-determinism. | |
2214 // We might spin when we shouldn't or vice-versa, but since the spin | |
2215 // count are relatively short, even in the worst case, the effect is harmless. | |
2216 // | |
2217 // Care must be taken that a low "D" value does not become an | |
2218 // an absorbing state. Transient spinning failures -- when spinning | |
2219 // is overall profitable -- should not cause the system to converge | |
2220 // on low "D" values. We want spinning to be stable and predictable | |
2221 // and fairly responsive to change and at the same time we don't want | |
2222 // it to oscillate, become metastable, be "too" non-deterministic, | |
2223 // or converge on or enter undesirable stable absorbing states. | |
2224 // | |
2225 // We implement a feedback-based control system -- using past behavior | |
2226 // to predict future behavior. We face two issues: (a) if the | |
2227 // input signal is random then the spin predictor won't provide optimal | |
2228 // results, and (b) if the signal frequency is too high then the control | |
2229 // system, which has some natural response lag, will "chase" the signal. | |
2230 // (b) can arise from multimodal lock hold times. Transient preemption | |
2231 // can also result in apparent bimodal lock hold times. | |
2232 // Although sub-optimal, neither condition is particularly harmful, as | |
2233 // in the worst-case we'll spin when we shouldn't or vice-versa. | |
2234 // The maximum spin duration is rather short so the failure modes aren't bad. | |
2235 // To be conservative, I've tuned the gain in system to bias toward | |
2236 // _not spinning. Relatedly, the system can sometimes enter a mode where it | |
2237 // "rings" or oscillates between spinning and not spinning. This happens | |
2238 // when spinning is just on the cusp of profitability, however, so the | |
2239 // situation is not dire. The state is benign -- there's no need to add | |
2240 // hysteresis control to damp the transition rate between spinning and | |
2241 // not spinning. | |
2242 // | |
2243 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - | |
2244 // | |
2245 // Spin-then-block strategies ... | |
2246 // | |
2247 // Thoughts on ways to improve spinning : | |
2248 // | |
2249 // * Periodically call {psr_}getloadavg() while spinning, and | |
2250 // permit unbounded spinning if the load average is < | |
2251 // the number of processors. Beware, however, that getloadavg() | |
2252 // is exceptionally fast on solaris (about 1/10 the cost of a full | |
2253 // spin cycle, but quite expensive on linux. Beware also, that | |
2254 // multiple JVMs could "ring" or oscillate in a feedback loop. | |
2255 // Sufficient damping would solve that problem. | |
2256 // | |
2257 // * We currently use spin loops with iteration counters to approximate | |
2258 // spinning for some interval. Given the availability of high-precision | |
2259 // time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should | |
2260 // someday reimplement the spin loops to duration-based instead of iteration-based. | |
2261 // | |
2262 // * Don't spin if there are more than N = (CPUs/2) threads | |
2263 // currently spinning on the monitor (or globally). | |
2264 // That is, limit the number of concurrent spinners. | |
2265 // We might also limit the # of spinners in the JVM, globally. | |
2266 // | |
2267 // * If a spinning thread observes _owner change hands it should | |
2268 // abort the spin (and park immediately) or at least debit | |
2269 // the spin counter by a large "penalty". | |
2270 // | |
2271 // * Classically, the spin count is either K*(CPUs-1) or is a | |
2272 // simple constant that approximates the length of a context switch. | |
2273 // We currently use a value -- computed by a special utility -- that | |
2274 // approximates round-trip context switch times. | |
2275 // | |
2276 // * Normally schedctl_start()/_stop() is used to advise the kernel | |
2277 // to avoid preempting threads that are running in short, bounded | |
2278 // critical sections. We could use the schedctl hooks in an inverted | |
2279 // sense -- spinners would set the nopreempt flag, but poll the preempt | |
2280 // pending flag. If a spinner observed a pending preemption it'd immediately | |
2281 // abort the spin and park. As such, the schedctl service acts as | |
2282 // a preemption warning mechanism. | |
2283 // | |
2284 // * In lieu of spinning, if the system is running below saturation | |
2285 // (that is, loadavg() << #cpus), we can instead suppress futile | |
2286 // wakeup throttling, or even wake more than one successor at exit-time. | |
2287 // The net effect is largely equivalent to spinning. In both cases, | |
2288 // contending threads go ONPROC and opportunistically attempt to acquire | |
2289 // the lock, decreasing lock handover latency at the expense of wasted | |
2290 // cycles and context switching. | |
2291 // | |
2292 // * We might to spin less after we've parked as the thread will | |
2293 // have less $ and TLB affinity with the processor. | |
2294 // Likewise, we might spin less if we come ONPROC on a different | |
2295 // processor or after a long period (>> rechose_interval). | |
2296 // | |
2297 // * A table-driven state machine similar to Solaris' dispadmin scheduling | |
2298 // tables might be a better design. Instead of encoding information in | |
2299 // _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit, | |
2300 // discrete states. Success or failure during a spin would drive | |
2301 // state transitions, and each state node would contain a spin count. | |
2302 // | |
2303 // * If the processor is operating in a mode intended to conserve power | |
2304 // (such as Intel's SpeedStep) or to reduce thermal output (thermal | |
2305 // step-down mode) then the Java synchronization subsystem should | |
2306 // forgo spinning. | |
2307 // | |
2308 // * The minimum spin duration should be approximately the worst-case | |
2309 // store propagation latency on the platform. That is, the time | |
2310 // it takes a store on CPU A to become visible on CPU B, where A and | |
2311 // B are "distant". | |
2312 // | |
2313 // * We might want to factor a thread's priority in the spin policy. | |
2314 // Threads with a higher priority might spin for slightly longer. | |
2315 // Similarly, if we use back-off in the TATAS loop, lower priority | |
2316 // threads might back-off longer. We don't currently use a | |
2317 // thread's priority when placing it on the entry queue. We may | |
2318 // want to consider doing so in future releases. | |
2319 // | |
2320 // * We might transiently drop a thread's scheduling priority while it spins. | |
2321 // SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris | |
2322 // would suffice. We could even consider letting the thread spin indefinitely at | |
2323 // a depressed or "idle" priority. This brings up fairness issues, however -- | |
2324 // in a saturated system a thread would with a reduced priority could languish | |
2325 // for extended periods on the ready queue. | |
2326 // | |
2327 // * While spinning try to use the otherwise wasted time to help the VM make | |
2328 // progress: | |
2329 // | |
2330 // -- YieldTo() the owner, if the owner is OFFPROC but ready | |
2331 // Done our remaining quantum directly to the ready thread. | |
2332 // This helps "push" the lock owner through the critical section. | |
2333 // It also tends to improve affinity/locality as the lock | |
2334 // "migrates" less frequently between CPUs. | |
2335 // -- Walk our own stack in anticipation of blocking. Memoize the roots. | |
2336 // -- Perform strand checking for other thread. Unpark potential strandees. | |
2337 // -- Help GC: trace or mark -- this would need to be a bounded unit of work. | |
2338 // Unfortunately this will pollute our $ and TLBs. Recall that we | |
2339 // spin to avoid context switching -- context switching has an | |
2340 // immediate cost in latency, a disruptive cost to other strands on a CMT | |
2341 // processor, and an amortized cost because of the D$ and TLB cache | |
2342 // reload transient when the thread comes back ONPROC and repopulates | |
2343 // $s and TLBs. | |
2344 // -- call getloadavg() to see if the system is saturated. It'd probably | |
2345 // make sense to call getloadavg() half way through the spin. | |
2346 // If the system isn't at full capacity the we'd simply reset | |
2347 // the spin counter to and extend the spin attempt. | |
2348 // -- Doug points out that we should use the same "helping" policy | |
2349 // in thread.yield(). | |
2350 // | |
2351 // * Try MONITOR-MWAIT on systems that support those instructions. | |
2352 // | |
2353 // * The spin statistics that drive spin decisions & frequency are | |
2354 // maintained in the objectmonitor structure so if we deflate and reinflate | |
2355 // we lose spin state. In practice this is not usually a concern | |
2356 // as the default spin state after inflation is aggressive (optimistic) | |
2357 // and tends toward spinning. So in the worst case for a lock where | |
2358 // spinning is not profitable we may spin unnecessarily for a brief | |
2359 // period. But then again, if a lock is contended it'll tend not to deflate | |
2360 // in the first place. | |
2361 | |
2362 | |
2363 intptr_t ObjectMonitor::SpinCallbackArgument = 0 ; | |
2364 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ; | |
2365 | |
2366 // Spinning: Fixed frequency (100%), vary duration | |
2367 | |
2368 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) { | |
2369 | |
2370 // Dumb, brutal spin. Good for comparative measurements against adaptive spinning. | |
2371 int ctr = Knob_FixedSpin ; | |
2372 if (ctr != 0) { | |
2373 while (--ctr >= 0) { | |
2374 if (TryLock (Self) > 0) return 1 ; | |
2375 SpinPause () ; | |
2376 } | |
2377 return 0 ; | |
2378 } | |
2379 | |
2380 for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) { | |
2381 if (TryLock(Self) > 0) { | |
2382 // Increase _SpinDuration ... | |
2383 // Note that we don't clamp SpinDuration precisely at SpinLimit. | |
2384 // Raising _SpurDuration to the poverty line is key. | |
2385 int x = _SpinDuration ; | |
2386 if (x < Knob_SpinLimit) { | |
2387 if (x < Knob_Poverty) x = Knob_Poverty ; | |
2388 _SpinDuration = x + Knob_BonusB ; | |
2389 } | |
2390 return 1 ; | |
2391 } | |
2392 SpinPause () ; | |
2393 } | |
2394 | |
2395 // Admission control - verify preconditions for spinning | |
2396 // | |
2397 // We always spin a little bit, just to prevent _SpinDuration == 0 from | |
2398 // becoming an absorbing state. Put another way, we spin briefly to | |
2399 // sample, just in case the system load, parallelism, contention, or lock | |
2400 // modality changed. | |
2401 // | |
2402 // Consider the following alternative: | |
2403 // Periodically set _SpinDuration = _SpinLimit and try a long/full | |
2404 // spin attempt. "Periodically" might mean after a tally of | |
2405 // the # of failed spin attempts (or iterations) reaches some threshold. | |
2406 // This takes us into the realm of 1-out-of-N spinning, where we | |
2407 // hold the duration constant but vary the frequency. | |
2408 | |
2409 ctr = _SpinDuration ; | |
2410 if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ; | |
2411 if (ctr <= 0) return 0 ; | |
2412 | |
2413 if (Knob_SuccRestrict && _succ != NULL) return 0 ; | |
2414 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) { | |
2415 TEVENT (Spin abort - notrunnable [TOP]); | |
2416 return 0 ; | |
2417 } | |
2418 | |
2419 int MaxSpin = Knob_MaxSpinners ; | |
2420 if (MaxSpin >= 0) { | |
2421 if (_Spinner > MaxSpin) { | |
2422 TEVENT (Spin abort -- too many spinners) ; | |
2423 return 0 ; | |
2424 } | |
2425 // Slighty racy, but benign ... | |
2426 Adjust (&_Spinner, 1) ; | |
2427 } | |
2428 | |
2429 // We're good to spin ... spin ingress. | |
2430 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades | |
2431 // when preparing to LD...CAS _owner, etc and the CAS is likely | |
2432 // to succeed. | |
2433 int hits = 0 ; | |
2434 int msk = 0 ; | |
2435 int caspty = Knob_CASPenalty ; | |
2436 int oxpty = Knob_OXPenalty ; | |
2437 int sss = Knob_SpinSetSucc ; | |
2438 if (sss && _succ == NULL ) _succ = Self ; | |
2439 Thread * prv = NULL ; | |
2440 | |
2441 // There are three ways to exit the following loop: | |
2442 // 1. A successful spin where this thread has acquired the lock. | |
2443 // 2. Spin failure with prejudice | |
2444 // 3. Spin failure without prejudice | |
2445 | |
2446 while (--ctr >= 0) { | |
2447 | |
2448 // Periodic polling -- Check for pending GC | |
2449 // Threads may spin while they're unsafe. | |
2450 // We don't want spinning threads to delay the JVM from reaching | |
2451 // a stop-the-world safepoint or to steal cycles from GC. | |
2452 // If we detect a pending safepoint we abort in order that | |
2453 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b) | |
2454 // this thread, if safe, doesn't steal cycles from GC. | |
2455 // This is in keeping with the "no loitering in runtime" rule. | |
2456 // We periodically check to see if there's a safepoint pending. | |
2457 if ((ctr & 0xFF) == 0) { | |
2458 if (SafepointSynchronize::do_call_back()) { | |
2459 TEVENT (Spin: safepoint) ; | |
2460 goto Abort ; // abrupt spin egress | |
2461 } | |
2462 if (Knob_UsePause & 1) SpinPause () ; | |
2463 | |
2464 int (*scb)(intptr_t,int) = SpinCallbackFunction ; | |
2465 if (hits > 50 && scb != NULL) { | |
2466 int abend = (*scb)(SpinCallbackArgument, 0) ; | |
2467 } | |
2468 } | |
2469 | |
2470 if (Knob_UsePause & 2) SpinPause() ; | |
2471 | |
2472 // Exponential back-off ... Stay off the bus to reduce coherency traffic. | |
2473 // This is useful on classic SMP systems, but is of less utility on | |
2474 // N1-style CMT platforms. | |
2475 // | |
2476 // Trade-off: lock acquisition latency vs coherency bandwidth. | |
2477 // Lock hold times are typically short. A histogram | |
2478 // of successful spin attempts shows that we usually acquire | |
2479 // the lock early in the spin. That suggests we want to | |
2480 // sample _owner frequently in the early phase of the spin, | |
2481 // but then back-off and sample less frequently as the spin | |
2482 // progresses. The back-off makes a good citizen on SMP big | |
2483 // SMP systems. Oversampling _owner can consume excessive | |
2484 // coherency bandwidth. Relatedly, if we _oversample _owner we | |
2485 // can inadvertently interfere with the the ST m->owner=null. | |
2486 // executed by the lock owner. | |
2487 if (ctr & msk) continue ; | |
2488 ++hits ; | |
2489 if ((hits & 0xF) == 0) { | |
2490 // The 0xF, above, corresponds to the exponent. | |
2491 // Consider: (msk+1)|msk | |
2492 msk = ((msk << 2)|3) & BackOffMask ; | |
2493 } | |
2494 | |
2495 // Probe _owner with TATAS | |
2496 // If this thread observes the monitor transition or flicker | |
2497 // from locked to unlocked to locked, then the odds that this | |
2498 // thread will acquire the lock in this spin attempt go down | |
2499 // considerably. The same argument applies if the CAS fails | |
2500 // or if we observe _owner change from one non-null value to | |
2501 // another non-null value. In such cases we might abort | |
2502 // the spin without prejudice or apply a "penalty" to the | |
2503 // spin count-down variable "ctr", reducing it by 100, say. | |
2504 | |
2505 Thread * ox = (Thread *) _owner ; | |
2506 if (ox == NULL) { | |
2507 ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; | |
2508 if (ox == NULL) { | |
2509 // The CAS succeeded -- this thread acquired ownership | |
2510 // Take care of some bookkeeping to exit spin state. | |
2511 if (sss && _succ == Self) { | |
2512 _succ = NULL ; | |
2513 } | |
2514 if (MaxSpin > 0) Adjust (&_Spinner, -1) ; | |
2515 | |
2516 // Increase _SpinDuration : | |
2517 // The spin was successful (profitable) so we tend toward | |
2518 // longer spin attempts in the future. | |
2519 // CONSIDER: factor "ctr" into the _SpinDuration adjustment. | |
2520 // If we acquired the lock early in the spin cycle it | |
2521 // makes sense to increase _SpinDuration proportionally. | |
2522 // Note that we don't clamp SpinDuration precisely at SpinLimit. | |
2523 int x = _SpinDuration ; | |
2524 if (x < Knob_SpinLimit) { | |
2525 if (x < Knob_Poverty) x = Knob_Poverty ; | |
2526 _SpinDuration = x + Knob_Bonus ; | |
2527 } | |
2528 return 1 ; | |
2529 } | |
2530 | |
2531 // The CAS failed ... we can take any of the following actions: | |
2532 // * penalize: ctr -= Knob_CASPenalty | |
2533 // * exit spin with prejudice -- goto Abort; | |
2534 // * exit spin without prejudice. | |
2535 // * Since CAS is high-latency, retry again immediately. | |
2536 prv = ox ; | |
2537 TEVENT (Spin: cas failed) ; | |
2538 if (caspty == -2) break ; | |
2539 if (caspty == -1) goto Abort ; | |
2540 ctr -= caspty ; | |
2541 continue ; | |
2542 } | |
2543 | |
2544 // Did lock ownership change hands ? | |
2545 if (ox != prv && prv != NULL ) { | |
2546 TEVENT (spin: Owner changed) | |
2547 if (oxpty == -2) break ; | |
2548 if (oxpty == -1) goto Abort ; | |
2549 ctr -= oxpty ; | |
2550 } | |
2551 prv = ox ; | |
2552 | |
2553 // Abort the spin if the owner is not executing. | |
2554 // The owner must be executing in order to drop the lock. | |
2555 // Spinning while the owner is OFFPROC is idiocy. | |
2556 // Consider: ctr -= RunnablePenalty ; | |
2557 if (Knob_OState && NotRunnable (Self, ox)) { | |
2558 TEVENT (Spin abort - notrunnable); | |
2559 goto Abort ; | |
2560 } | |
2561 if (sss && _succ == NULL ) _succ = Self ; | |
2562 } | |
2563 | |
2564 // Spin failed with prejudice -- reduce _SpinDuration. | |
2565 // TODO: Use an AIMD-like policy to adjust _SpinDuration. | |
2566 // AIMD is globally stable. | |
2567 TEVENT (Spin failure) ; | |
2568 { | |
2569 int x = _SpinDuration ; | |
2570 if (x > 0) { | |
2571 // Consider an AIMD scheme like: x -= (x >> 3) + 100 | |
2572 // This is globally sample and tends to damp the response. | |
2573 x -= Knob_Penalty ; | |
2574 if (x < 0) x = 0 ; | |
2575 _SpinDuration = x ; | |
2576 } | |
2577 } | |
2578 | |
2579 Abort: | |
2580 if (MaxSpin >= 0) Adjust (&_Spinner, -1) ; | |
2581 if (sss && _succ == Self) { | |
2582 _succ = NULL ; | |
2583 // Invariant: after setting succ=null a contending thread | |
2584 // must recheck-retry _owner before parking. This usually happens | |
2585 // in the normal usage of TrySpin(), but it's safest | |
2586 // to make TrySpin() as foolproof as possible. | |
2587 OrderAccess::fence() ; | |
2588 if (TryLock(Self) > 0) return 1 ; | |
2589 } | |
2590 return 0 ; | |
2591 } | |
2592 | |
2593 #define TrySpin TrySpin_VaryDuration | |
2594 | |
2595 static void DeferredInitialize () { | |
2596 if (InitDone > 0) return ; | |
2597 if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) { | |
2598 while (InitDone != 1) ; | |
2599 return ; | |
2600 } | |
2601 | |
2602 // One-shot global initialization ... | |
2603 // The initialization is idempotent, so we don't need locks. | |
2604 // In the future consider doing this via os::init_2(). | |
2605 // SyncKnobs consist of <Key>=<Value> pairs in the style | |
2606 // of environment variables. Start by converting ':' to NUL. | |
2607 | |
2608 if (SyncKnobs == NULL) SyncKnobs = "" ; | |
2609 | |
2610 size_t sz = strlen (SyncKnobs) ; | |
2611 char * knobs = (char *) malloc (sz + 2) ; | |
2612 if (knobs == NULL) { | |
2613 vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ; | |
2614 guarantee (0, "invariant") ; | |
2615 } | |
2616 strcpy (knobs, SyncKnobs) ; | |
2617 knobs[sz+1] = 0 ; | |
2618 for (char * p = knobs ; *p ; p++) { | |
2619 if (*p == ':') *p = 0 ; | |
2620 } | |
2621 | |
2622 #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); } | |
2623 SETKNOB(ReportSettings) ; | |
2624 SETKNOB(Verbose) ; | |
2625 SETKNOB(FixedSpin) ; | |
2626 SETKNOB(SpinLimit) ; | |
2627 SETKNOB(SpinBase) ; | |
2628 SETKNOB(SpinBackOff); | |
2629 SETKNOB(CASPenalty) ; | |
2630 SETKNOB(OXPenalty) ; | |
2631 SETKNOB(LogSpins) ; | |
2632 SETKNOB(SpinSetSucc) ; | |
2633 SETKNOB(SuccEnabled) ; | |
2634 SETKNOB(SuccRestrict) ; | |
2635 SETKNOB(Penalty) ; | |
2636 SETKNOB(Bonus) ; | |
2637 SETKNOB(BonusB) ; | |
2638 SETKNOB(Poverty) ; | |
2639 SETKNOB(SpinAfterFutile) ; | |
2640 SETKNOB(UsePause) ; | |
2641 SETKNOB(SpinEarly) ; | |
2642 SETKNOB(OState) ; | |
2643 SETKNOB(MaxSpinners) ; | |
2644 SETKNOB(PreSpin) ; | |
2645 SETKNOB(ExitPolicy) ; | |
2646 SETKNOB(QMode); | |
2647 SETKNOB(ResetEvent) ; | |
2648 SETKNOB(MoveNotifyee) ; | |
2649 SETKNOB(FastHSSEC) ; | |
2650 #undef SETKNOB | |
2651 | |
2652 if (os::is_MP()) { | |
2653 BackOffMask = (1 << Knob_SpinBackOff) - 1 ; | |
2654 if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ; | |
2655 // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1) | |
2656 } else { | |
2657 Knob_SpinLimit = 0 ; | |
2658 Knob_SpinBase = 0 ; | |
2659 Knob_PreSpin = 0 ; | |
2660 Knob_FixedSpin = -1 ; | |
2661 } | |
2662 | |
2663 if (Knob_LogSpins == 0) { | |
2664 ObjectSynchronizer::_sync_FailedSpins = NULL ; | |
2665 } | |
2666 | |
2667 free (knobs) ; | |
2668 OrderAccess::fence() ; | |
2669 InitDone = 1 ; | |
2670 } | |
2671 | |
2672 // Theory of operations -- Monitors lists, thread residency, etc: | |
2673 // | |
2674 // * A thread acquires ownership of a monitor by successfully | |
2675 // CAS()ing the _owner field from null to non-null. | |
2676 // | |
2677 // * Invariant: A thread appears on at most one monitor list -- | |
2678 // cxq, EntryList or WaitSet -- at any one time. | |
2679 // | |
2680 // * Contending threads "push" themselves onto the cxq with CAS | |
2681 // and then spin/park. | |
2682 // | |
2683 // * After a contending thread eventually acquires the lock it must | |
2684 // dequeue itself from either the EntryList or the cxq. | |
2685 // | |
2686 // * The exiting thread identifies and unparks an "heir presumptive" | |
2687 // tentative successor thread on the EntryList. Critically, the | |
2688 // exiting thread doesn't unlink the successor thread from the EntryList. | |
2689 // After having been unparked, the wakee will recontend for ownership of | |
2690 // the monitor. The successor (wakee) will either acquire the lock or | |
2691 // re-park itself. | |
2692 // | |
2693 // Succession is provided for by a policy of competitive handoff. | |
2694 // The exiting thread does _not_ grant or pass ownership to the | |
2695 // successor thread. (This is also referred to as "handoff" succession"). | |
2696 // Instead the exiting thread releases ownership and possibly wakes | |
2697 // a successor, so the successor can (re)compete for ownership of the lock. | |
2698 // If the EntryList is empty but the cxq is populated the exiting | |
2699 // thread will drain the cxq into the EntryList. It does so by | |
2700 // by detaching the cxq (installing null with CAS) and folding | |
2701 // the threads from the cxq into the EntryList. The EntryList is | |
2702 // doubly linked, while the cxq is singly linked because of the | |
2703 // CAS-based "push" used to enqueue recently arrived threads (RATs). | |
2704 // | |
2705 // * Concurrency invariants: | |
2706 // | |
2707 // -- only the monitor owner may access or mutate the EntryList. | |
2708 // The mutex property of the monitor itself protects the EntryList | |
2709 // from concurrent interference. | |
2710 // -- Only the monitor owner may detach the cxq. | |
2711 // | |
2712 // * The monitor entry list operations avoid locks, but strictly speaking | |
2713 // they're not lock-free. Enter is lock-free, exit is not. | |
2714 // See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html | |
2715 // | |
2716 // * The cxq can have multiple concurrent "pushers" but only one concurrent | |
2717 // detaching thread. This mechanism is immune from the ABA corruption. | |
2718 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious. | |
2719 // | |
2720 // * Taken together, the cxq and the EntryList constitute or form a | |
2721 // single logical queue of threads stalled trying to acquire the lock. | |
2722 // We use two distinct lists to improve the odds of a constant-time | |
2723 // dequeue operation after acquisition (in the ::enter() epilog) and | |
2724 // to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm). | |
2725 // A key desideratum is to minimize queue & monitor metadata manipulation | |
2726 // that occurs while holding the monitor lock -- that is, we want to | |
2727 // minimize monitor lock holds times. Note that even a small amount of | |
2728 // fixed spinning will greatly reduce the # of enqueue-dequeue operations | |
2729 // on EntryList|cxq. That is, spinning relieves contention on the "inner" | |
2730 // locks and monitor metadata. | |
2731 // | |
2732 // Cxq points to the the set of Recently Arrived Threads attempting entry. | |
2733 // Because we push threads onto _cxq with CAS, the RATs must take the form of | |
2734 // a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when | |
2735 // the unlocking thread notices that EntryList is null but _cxq is != null. | |
2736 // | |
2737 // The EntryList is ordered by the prevailing queue discipline and | |
2738 // can be organized in any convenient fashion, such as a doubly-linked list or | |
2739 // a circular doubly-linked list. Critically, we want insert and delete operations | |
2740 // to operate in constant-time. If we need a priority queue then something akin | |
2741 // to Solaris' sleepq would work nicely. Viz., | |
2742 // http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. | |
2743 // Queue discipline is enforced at ::exit() time, when the unlocking thread | |
2744 // drains the cxq into the EntryList, and orders or reorders the threads on the | |
2745 // EntryList accordingly. | |
2746 // | |
2747 // Barring "lock barging", this mechanism provides fair cyclic ordering, | |
2748 // somewhat similar to an elevator-scan. | |
2749 // | |
2750 // * The monitor synchronization subsystem avoids the use of native | |
2751 // synchronization primitives except for the narrow platform-specific | |
2752 // park-unpark abstraction. See the comments in os_solaris.cpp regarding | |
2753 // the semantics of park-unpark. Put another way, this monitor implementation | |
2754 // depends only on atomic operations and park-unpark. The monitor subsystem | |
2755 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the | |
2756 // underlying OS manages the READY<->RUN transitions. | |
2757 // | |
2758 // * Waiting threads reside on the WaitSet list -- wait() puts | |
2759 // the caller onto the WaitSet. | |
2760 // | |
2761 // * notify() or notifyAll() simply transfers threads from the WaitSet to | |
2762 // either the EntryList or cxq. Subsequent exit() operations will | |
2763 // unpark the notifyee. Unparking a notifee in notify() is inefficient - | |
2764 // it's likely the notifyee would simply impale itself on the lock held | |
2765 // by the notifier. | |
2766 // | |
2767 // * An interesting alternative is to encode cxq as (List,LockByte) where | |
2768 // the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary | |
2769 // variable, like _recursions, in the scheme. The threads or Events that form | |
2770 // the list would have to be aligned in 256-byte addresses. A thread would | |
2771 // try to acquire the lock or enqueue itself with CAS, but exiting threads | |
2772 // could use a 1-0 protocol and simply STB to set the LockByte to 0. | |
2773 // Note that is is *not* word-tearing, but it does presume that full-word | |
2774 // CAS operations are coherent with intermix with STB operations. That's true | |
2775 // on most common processors. | |
2776 // | |
2777 // * See also http://blogs.sun.com/dave | |
2778 | |
2779 | |
2780 void ATTR ObjectMonitor::EnterI (TRAPS) { | |
2781 Thread * Self = THREAD ; | |
2782 assert (Self->is_Java_thread(), "invariant") ; | |
2783 assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ; | |
2784 | |
2785 // Try the lock - TATAS | |
2786 if (TryLock (Self) > 0) { | |
2787 assert (_succ != Self , "invariant") ; | |
2788 assert (_owner == Self , "invariant") ; | |
2789 assert (_Responsible != Self , "invariant") ; | |
2790 return ; | |
2791 } | |
2792 | |
2793 DeferredInitialize () ; | |
2794 | |
2795 // We try one round of spinning *before* enqueueing Self. | |
2796 // | |
2797 // If the _owner is ready but OFFPROC we could use a YieldTo() | |
2798 // operation to donate the remainder of this thread's quantum | |
2799 // to the owner. This has subtle but beneficial affinity | |
2800 // effects. | |
2801 | |
2802 if (TrySpin (Self) > 0) { | |
2803 assert (_owner == Self , "invariant") ; | |
2804 assert (_succ != Self , "invariant") ; | |
2805 assert (_Responsible != Self , "invariant") ; | |
2806 return ; | |
2807 } | |
2808 | |
2809 // The Spin failed -- Enqueue and park the thread ... | |
2810 assert (_succ != Self , "invariant") ; | |
2811 assert (_owner != Self , "invariant") ; | |
2812 assert (_Responsible != Self , "invariant") ; | |
2813 | |
2814 // Enqueue "Self" on ObjectMonitor's _cxq. | |
2815 // | |
2816 // Node acts as a proxy for Self. | |
2817 // As an aside, if were to ever rewrite the synchronization code mostly | |
2818 // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class | |
2819 // Java objects. This would avoid awkward lifecycle and liveness issues, | |
2820 // as well as eliminate a subset of ABA issues. | |
2821 // TODO: eliminate ObjectWaiter and enqueue either Threads or Events. | |
2822 // | |
2823 | |
2824 ObjectWaiter node(Self) ; | |
2825 Self->_ParkEvent->reset() ; | |
2826 node._prev = (ObjectWaiter *) 0xBAD ; | |
2827 node.TState = ObjectWaiter::TS_CXQ ; | |
2828 | |
2829 // Push "Self" onto the front of the _cxq. | |
2830 // Once on cxq/EntryList, Self stays on-queue until it acquires the lock. | |
2831 // Note that spinning tends to reduce the rate at which threads | |
2832 // enqueue and dequeue on EntryList|cxq. | |
2833 ObjectWaiter * nxt ; | |
2834 for (;;) { | |
2835 node._next = nxt = _cxq ; | |
2836 if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ; | |
2837 | |
2838 // Interference - the CAS failed because _cxq changed. Just retry. | |
2839 // As an optional optimization we retry the lock. | |
2840 if (TryLock (Self) > 0) { | |
2841 assert (_succ != Self , "invariant") ; | |
2842 assert (_owner == Self , "invariant") ; | |
2843 assert (_Responsible != Self , "invariant") ; | |
2844 return ; | |
2845 } | |
2846 } | |
2847 | |
2848 // Check for cxq|EntryList edge transition to non-null. This indicates | |
2849 // the onset of contention. While contention persists exiting threads | |
2850 // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit | |
2851 // operations revert to the faster 1-0 mode. This enter operation may interleave | |
2852 // (race) a concurrent 1-0 exit operation, resulting in stranding, so we | |
2853 // arrange for one of the contending thread to use a timed park() operations | |
2854 // to detect and recover from the race. (Stranding is form of progress failure | |
2855 // where the monitor is unlocked but all the contending threads remain parked). | |
2856 // That is, at least one of the contended threads will periodically poll _owner. | |
2857 // One of the contending threads will become the designated "Responsible" thread. | |
2858 // The Responsible thread uses a timed park instead of a normal indefinite park | |
2859 // operation -- it periodically wakes and checks for and recovers from potential | |
2860 // strandings admitted by 1-0 exit operations. We need at most one Responsible | |
2861 // thread per-monitor at any given moment. Only threads on cxq|EntryList may | |
2862 // be responsible for a monitor. | |
2863 // | |
2864 // Currently, one of the contended threads takes on the added role of "Responsible". | |
2865 // A viable alternative would be to use a dedicated "stranding checker" thread | |
2866 // that periodically iterated over all the threads (or active monitors) and unparked | |
2867 // successors where there was risk of stranding. This would help eliminate the | |
2868 // timer scalability issues we see on some platforms as we'd only have one thread | |
2869 // -- the checker -- parked on a timer. | |
2870 | |
2871 if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { | |
2872 // Try to assume the role of responsible thread for the monitor. | |
2873 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } | |
2874 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; | |
2875 } | |
2876 | |
2877 // The lock have been released while this thread was occupied queueing | |
2878 // itself onto _cxq. To close the race and avoid "stranding" and | |
2879 // progress-liveness failure we must resample-retry _owner before parking. | |
2880 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner. | |
2881 // In this case the ST-MEMBAR is accomplished with CAS(). | |
2882 // | |
2883 // TODO: Defer all thread state transitions until park-time. | |
2884 // Since state transitions are heavy and inefficient we'd like | |
2885 // to defer the state transitions until absolutely necessary, | |
2886 // and in doing so avoid some transitions ... | |
2887 | |
2888 TEVENT (Inflated enter - Contention) ; | |
2889 int nWakeups = 0 ; | |
2890 int RecheckInterval = 1 ; | |
2891 | |
2892 for (;;) { | |
2893 | |
2894 if (TryLock (Self) > 0) break ; | |
2895 assert (_owner != Self, "invariant") ; | |
2896 | |
2897 if ((SyncFlags & 2) && _Responsible == NULL) { | |
2898 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; | |
2899 } | |
2900 | |
2901 // park self | |
2902 if (_Responsible == Self || (SyncFlags & 1)) { | |
2903 TEVENT (Inflated enter - park TIMED) ; | |
2904 Self->_ParkEvent->park ((jlong) RecheckInterval) ; | |
2905 // Increase the RecheckInterval, but clamp the value. | |
2906 RecheckInterval *= 8 ; | |
2907 if (RecheckInterval > 1000) RecheckInterval = 1000 ; | |
2908 } else { | |
2909 TEVENT (Inflated enter - park UNTIMED) ; | |
2910 Self->_ParkEvent->park() ; | |
2911 } | |
2912 | |
2913 if (TryLock(Self) > 0) break ; | |
2914 | |
2915 // The lock is still contested. | |
2916 // Keep a tally of the # of futile wakeups. | |
2917 // Note that the counter is not protected by a lock or updated by atomics. | |
2918 // That is by design - we trade "lossy" counters which are exposed to | |
2919 // races during updates for a lower probe effect. | |
2920 TEVENT (Inflated enter - Futile wakeup) ; | |
2921 if (ObjectSynchronizer::_sync_FutileWakeups != NULL) { | |
2922 ObjectSynchronizer::_sync_FutileWakeups->inc() ; | |
2923 } | |
2924 ++ nWakeups ; | |
2925 | |
2926 // Assuming this is not a spurious wakeup we'll normally find _succ == Self. | |
2927 // We can defer clearing _succ until after the spin completes | |
2928 // TrySpin() must tolerate being called with _succ == Self. | |
2929 // Try yet another round of adaptive spinning. | |
2930 if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ; | |
2931 | |
2932 // We can find that we were unpark()ed and redesignated _succ while | |
2933 // we were spinning. That's harmless. If we iterate and call park(), | |
2934 // park() will consume the event and return immediately and we'll | |
2935 // just spin again. This pattern can repeat, leaving _succ to simply | |
2936 // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks(). | |
2937 // Alternately, we can sample fired() here, and if set, forgo spinning | |
2938 // in the next iteration. | |
2939 | |
2940 if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) { | |
2941 Self->_ParkEvent->reset() ; | |
2942 OrderAccess::fence() ; | |
2943 } | |
2944 if (_succ == Self) _succ = NULL ; | |
2945 | |
2946 // Invariant: after clearing _succ a thread *must* retry _owner before parking. | |
2947 OrderAccess::fence() ; | |
2948 } | |
2949 | |
2950 // Egress : | |
2951 // Self has acquired the lock -- Unlink Self from the cxq or EntryList. | |
2952 // Normally we'll find Self on the EntryList . | |
2953 // From the perspective of the lock owner (this thread), the | |
2954 // EntryList is stable and cxq is prepend-only. | |
2955 // The head of cxq is volatile but the interior is stable. | |
2956 // In addition, Self.TState is stable. | |
2957 | |
2958 assert (_owner == Self , "invariant") ; | |
2959 assert (object() != NULL , "invariant") ; | |
2960 // I'd like to write: | |
2961 // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; | |
2962 // but as we're at a safepoint that's not safe. | |
2963 | |
2964 UnlinkAfterAcquire (Self, &node) ; | |
2965 if (_succ == Self) _succ = NULL ; | |
2966 | |
2967 assert (_succ != Self, "invariant") ; | |
2968 if (_Responsible == Self) { | |
2969 _Responsible = NULL ; | |
2970 // Dekker pivot-point. | |
2971 // Consider OrderAccess::storeload() here | |
2972 | |
2973 // We may leave threads on cxq|EntryList without a designated | |
2974 // "Responsible" thread. This is benign. When this thread subsequently | |
2975 // exits the monitor it can "see" such preexisting "old" threads -- | |
2976 // threads that arrived on the cxq|EntryList before the fence, above -- | |
2977 // by LDing cxq|EntryList. Newly arrived threads -- that is, threads | |
2978 // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible | |
2979 // non-null and elect a new "Responsible" timer thread. | |
2980 // | |
2981 // This thread executes: | |
2982 // ST Responsible=null; MEMBAR (in enter epilog - here) | |
2983 // LD cxq|EntryList (in subsequent exit) | |
2984 // | |
2985 // Entering threads in the slow/contended path execute: | |
2986 // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog) | |
2987 // The (ST cxq; MEMBAR) is accomplished with CAS(). | |
2988 // | |
2989 // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent | |
2990 // exit operation from floating above the ST Responsible=null. | |
2991 // | |
2992 // In *practice* however, EnterI() is always followed by some atomic | |
2993 // operation such as the decrement of _count in ::enter(). Those atomics | |
2994 // obviate the need for the explicit MEMBAR, above. | |
2995 } | |
2996 | |
2997 // We've acquired ownership with CAS(). | |
2998 // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics. | |
2999 // But since the CAS() this thread may have also stored into _succ, | |
3000 // EntryList, cxq or Responsible. These meta-data updates must be | |
3001 // visible __before this thread subsequently drops the lock. | |
3002 // Consider what could occur if we didn't enforce this constraint -- | |
3003 // STs to monitor meta-data and user-data could reorder with (become | |
3004 // visible after) the ST in exit that drops ownership of the lock. | |
3005 // Some other thread could then acquire the lock, but observe inconsistent | |
3006 // or old monitor meta-data and heap data. That violates the JMM. | |
3007 // To that end, the 1-0 exit() operation must have at least STST|LDST | |
3008 // "release" barrier semantics. Specifically, there must be at least a | |
3009 // STST|LDST barrier in exit() before the ST of null into _owner that drops | |
3010 // the lock. The barrier ensures that changes to monitor meta-data and data | |
3011 // protected by the lock will be visible before we release the lock, and | |
3012 // therefore before some other thread (CPU) has a chance to acquire the lock. | |
3013 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html. | |
3014 // | |
3015 // Critically, any prior STs to _succ or EntryList must be visible before | |
3016 // the ST of null into _owner in the *subsequent* (following) corresponding | |
3017 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily | |
3018 // execute a serializing instruction. | |
3019 | |
3020 if (SyncFlags & 8) { | |
3021 OrderAccess::fence() ; | |
3022 } | |
3023 return ; | |
3024 } | |
3025 | |
3026 // ExitSuspendEquivalent: | |
3027 // A faster alternate to handle_special_suspend_equivalent_condition() | |
3028 // | |
3029 // handle_special_suspend_equivalent_condition() unconditionally | |
3030 // acquires the SR_lock. On some platforms uncontended MutexLocker() | |
3031 // operations have high latency. Note that in ::enter() we call HSSEC | |
3032 // while holding the monitor, so we effectively lengthen the critical sections. | |
3033 // | |
3034 // There are a number of possible solutions: | |
3035 // | |
3036 // A. To ameliorate the problem we might also defer state transitions | |
3037 // to as late as possible -- just prior to parking. | |
3038 // Given that, we'd call HSSEC after having returned from park(), | |
3039 // but before attempting to acquire the monitor. This is only a | |
3040 // partial solution. It avoids calling HSSEC while holding the | |
3041 // monitor (good), but it still increases successor reacquisition latency -- | |
3042 // the interval between unparking a successor and the time the successor | |
3043 // resumes and retries the lock. See ReenterI(), which defers state transitions. | |
3044 // If we use this technique we can also avoid EnterI()-exit() loop | |
3045 // in ::enter() where we iteratively drop the lock and then attempt | |
3046 // to reacquire it after suspending. | |
3047 // | |
3048 // B. In the future we might fold all the suspend bits into a | |
3049 // composite per-thread suspend flag and then update it with CAS(). | |
3050 // Alternately, a Dekker-like mechanism with multiple variables | |
3051 // would suffice: | |
3052 // ST Self->_suspend_equivalent = false | |
3053 // MEMBAR | |
3054 // LD Self_>_suspend_flags | |
3055 // | |
3056 | |
3057 | |
3058 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) { | |
3059 int Mode = Knob_FastHSSEC ; | |
3060 if (Mode && !jSelf->is_external_suspend()) { | |
3061 assert (jSelf->is_suspend_equivalent(), "invariant") ; | |
3062 jSelf->clear_suspend_equivalent() ; | |
3063 if (2 == Mode) OrderAccess::storeload() ; | |
3064 if (!jSelf->is_external_suspend()) return false ; | |
3065 // We raced a suspension -- fall thru into the slow path | |
3066 TEVENT (ExitSuspendEquivalent - raced) ; | |
3067 jSelf->set_suspend_equivalent() ; | |
3068 } | |
3069 return jSelf->handle_special_suspend_equivalent_condition() ; | |
3070 } | |
3071 | |
3072 | |
3073 // ReenterI() is a specialized inline form of the latter half of the | |
3074 // contended slow-path from EnterI(). We use ReenterI() only for | |
3075 // monitor reentry in wait(). | |
3076 // | |
3077 // In the future we should reconcile EnterI() and ReenterI(), adding | |
3078 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the | |
3079 // loop accordingly. | |
3080 | |
3081 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) { | |
3082 assert (Self != NULL , "invariant") ; | |
3083 assert (SelfNode != NULL , "invariant") ; | |
3084 assert (SelfNode->_thread == Self , "invariant") ; | |
3085 assert (_waiters > 0 , "invariant") ; | |
3086 assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ; | |
3087 assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; | |
3088 JavaThread * jt = (JavaThread *) Self ; | |
3089 | |
3090 int nWakeups = 0 ; | |
3091 for (;;) { | |
3092 ObjectWaiter::TStates v = SelfNode->TState ; | |
3093 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; | |
3094 assert (_owner != Self, "invariant") ; | |
3095 | |
3096 if (TryLock (Self) > 0) break ; | |
3097 if (TrySpin (Self) > 0) break ; | |
3098 | |
3099 TEVENT (Wait Reentry - parking) ; | |
3100 | |
3101 // State transition wrappers around park() ... | |
3102 // ReenterI() wisely defers state transitions until | |
3103 // it's clear we must park the thread. | |
3104 { | |
3105 OSThreadContendState osts(Self->osthread()); | |
3106 ThreadBlockInVM tbivm(jt); | |
3107 | |
3108 // cleared by handle_special_suspend_equivalent_condition() | |
3109 // or java_suspend_self() | |
3110 jt->set_suspend_equivalent(); | |
3111 if (SyncFlags & 1) { | |
3112 Self->_ParkEvent->park ((jlong)1000) ; | |
3113 } else { | |
3114 Self->_ParkEvent->park () ; | |
3115 } | |
3116 | |
3117 // were we externally suspended while we were waiting? | |
3118 for (;;) { | |
3119 if (!ExitSuspendEquivalent (jt)) break ; | |
3120 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); } | |
3121 jt->java_suspend_self(); | |
3122 jt->set_suspend_equivalent(); | |
3123 } | |
3124 } | |
3125 | |
3126 // Try again, but just so we distinguish between futile wakeups and | |
3127 // successful wakeups. The following test isn't algorithmically | |
3128 // necessary, but it helps us maintain sensible statistics. | |
3129 if (TryLock(Self) > 0) break ; | |
3130 | |
3131 // The lock is still contested. | |
3132 // Keep a tally of the # of futile wakeups. | |
3133 // Note that the counter is not protected by a lock or updated by atomics. | |
3134 // That is by design - we trade "lossy" counters which are exposed to | |
3135 // races during updates for a lower probe effect. | |
3136 TEVENT (Wait Reentry - futile wakeup) ; | |
3137 ++ nWakeups ; | |
3138 | |
3139 // Assuming this is not a spurious wakeup we'll normally | |
3140 // find that _succ == Self. | |
3141 if (_succ == Self) _succ = NULL ; | |
3142 | |
3143 // Invariant: after clearing _succ a contending thread | |
3144 // *must* retry _owner before parking. | |
3145 OrderAccess::fence() ; | |
3146 | |
3147 if (ObjectSynchronizer::_sync_FutileWakeups != NULL) { | |
3148 ObjectSynchronizer::_sync_FutileWakeups->inc() ; | |
3149 } | |
3150 } | |
3151 | |
3152 // Self has acquired the lock -- Unlink Self from the cxq or EntryList . | |
3153 // Normally we'll find Self on the EntryList. | |
3154 // Unlinking from the EntryList is constant-time and atomic-free. | |
3155 // From the perspective of the lock owner (this thread), the | |
3156 // EntryList is stable and cxq is prepend-only. | |
3157 // The head of cxq is volatile but the interior is stable. | |
3158 // In addition, Self.TState is stable. | |
3159 | |
3160 assert (_owner == Self, "invariant") ; | |
3161 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; | |
3162 UnlinkAfterAcquire (Self, SelfNode) ; | |
3163 if (_succ == Self) _succ = NULL ; | |
3164 assert (_succ != Self, "invariant") ; | |
3165 SelfNode->TState = ObjectWaiter::TS_RUN ; | |
3166 OrderAccess::fence() ; // see comments at the end of EnterI() | |
3167 } | |
3168 | |
3169 bool ObjectMonitor::try_enter(Thread* THREAD) { | |
3170 if (THREAD != _owner) { | |
3171 if (THREAD->is_lock_owned ((address)_owner)) { | |
3172 assert(_recursions == 0, "internal state error"); | |
3173 _owner = THREAD ; | |
3174 _recursions = 1 ; | |
3175 OwnerIsThread = 1 ; | |
3176 return true; | |
3177 } | |
3178 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { | |
3179 return false; | |
3180 } | |
3181 return true; | |
3182 } else { | |
3183 _recursions++; | |
3184 return true; | |
3185 } | |
3186 } | |
3187 | |
3188 void ATTR ObjectMonitor::enter(TRAPS) { | |
3189 // The following code is ordered to check the most common cases first | |
3190 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. | |
3191 Thread * const Self = THREAD ; | |
3192 void * cur ; | |
3193 | |
3194 cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; | |
3195 if (cur == NULL) { | |
3196 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. | |
3197 assert (_recursions == 0 , "invariant") ; | |
3198 assert (_owner == Self, "invariant") ; | |
3199 // CONSIDER: set or assert OwnerIsThread == 1 | |
3200 return ; | |
3201 } | |
3202 | |
3203 if (cur == Self) { | |
3204 // TODO-FIXME: check for integer overflow! BUGID 6557169. | |
3205 _recursions ++ ; | |
3206 return ; | |
3207 } | |
3208 | |
3209 if (Self->is_lock_owned ((address)cur)) { | |
3210 assert (_recursions == 0, "internal state error"); | |
3211 _recursions = 1 ; | |
3212 // Commute owner from a thread-specific on-stack BasicLockObject address to | |
3213 // a full-fledged "Thread *". | |
3214 _owner = Self ; | |
3215 OwnerIsThread = 1 ; | |
3216 return ; | |
3217 } | |
3218 | |
3219 // We've encountered genuine contention. | |
3220 assert (Self->_Stalled == 0, "invariant") ; | |
3221 Self->_Stalled = intptr_t(this) ; | |
3222 | |
3223 // Try one round of spinning *before* enqueueing Self | |
3224 // and before going through the awkward and expensive state | |
3225 // transitions. The following spin is strictly optional ... | |
3226 // Note that if we acquire the monitor from an initial spin | |
3227 // we forgo posting JVMTI events and firing DTRACE probes. | |
3228 if (Knob_SpinEarly && TrySpin (Self) > 0) { | |
3229 assert (_owner == Self , "invariant") ; | |
3230 assert (_recursions == 0 , "invariant") ; | |
3231 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; | |
3232 Self->_Stalled = 0 ; | |
3233 return ; | |
3234 } | |
3235 | |
3236 assert (_owner != Self , "invariant") ; | |
3237 assert (_succ != Self , "invariant") ; | |
3238 assert (Self->is_Java_thread() , "invariant") ; | |
3239 JavaThread * jt = (JavaThread *) Self ; | |
3240 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; | |
3241 assert (jt->thread_state() != _thread_blocked , "invariant") ; | |
3242 assert (this->object() != NULL , "invariant") ; | |
3243 assert (_count >= 0, "invariant") ; | |
3244 | |
3245 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). | |
3246 // Ensure the object-monitor relationship remains stable while there's contention. | |
3247 Atomic::inc_ptr(&_count); | |
3248 | |
3249 { // Change java thread status to indicate blocked on monitor enter. | |
3250 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); | |
3251 | |
3252 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt); | |
3253 if (JvmtiExport::should_post_monitor_contended_enter()) { | |
3254 JvmtiExport::post_monitor_contended_enter(jt, this); | |
3255 } | |
3256 | |
3257 OSThreadContendState osts(Self->osthread()); | |
3258 ThreadBlockInVM tbivm(jt); | |
3259 | |
3260 Self->set_current_pending_monitor(this); | |
3261 | |
3262 // TODO-FIXME: change the following for(;;) loop to straight-line code. | |
3263 for (;;) { | |
3264 jt->set_suspend_equivalent(); | |
3265 // cleared by handle_special_suspend_equivalent_condition() | |
3266 // or java_suspend_self() | |
3267 | |
3268 EnterI (THREAD) ; | |
3269 | |
3270 if (!ExitSuspendEquivalent(jt)) break ; | |
3271 | |
3272 // | |
3273 // We have acquired the contended monitor, but while we were | |
3274 // waiting another thread suspended us. We don't want to enter | |
3275 // the monitor while suspended because that would surprise the | |
3276 // thread that suspended us. | |
3277 // | |
3278 _recursions = 0 ; | |
3279 _succ = NULL ; | |
3280 exit (Self) ; | |
3281 | |
3282 jt->java_suspend_self(); | |
3283 } | |
3284 Self->set_current_pending_monitor(NULL); | |
3285 } | |
3286 | |
3287 Atomic::dec_ptr(&_count); | |
3288 assert (_count >= 0, "invariant") ; | |
3289 Self->_Stalled = 0 ; | |
3290 | |
3291 // Must either set _recursions = 0 or ASSERT _recursions == 0. | |
3292 assert (_recursions == 0 , "invariant") ; | |
3293 assert (_owner == Self , "invariant") ; | |
3294 assert (_succ != Self , "invariant") ; | |
3295 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; | |
3296 | |
3297 // The thread -- now the owner -- is back in vm mode. | |
3298 // Report the glorious news via TI,DTrace and jvmstat. | |
3299 // The probe effect is non-trivial. All the reportage occurs | |
3300 // while we hold the monitor, increasing the length of the critical | |
3301 // section. Amdahl's parallel speedup law comes vividly into play. | |
3302 // | |
3303 // Another option might be to aggregate the events (thread local or | |
3304 // per-monitor aggregation) and defer reporting until a more opportune | |
3305 // time -- such as next time some thread encounters contention but has | |
3306 // yet to acquire the lock. While spinning that thread could | |
3307 // spinning we could increment JVMStat counters, etc. | |
3308 | |
3309 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt); | |
3310 if (JvmtiExport::should_post_monitor_contended_entered()) { | |
3311 JvmtiExport::post_monitor_contended_entered(jt, this); | |
3312 } | |
3313 if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) { | |
3314 ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ; | |
3315 } | |
3316 } | |
3317 | |
3318 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) { | |
3319 assert (_owner == Self, "invariant") ; | |
3320 | |
3321 // Exit protocol: | |
3322 // 1. ST _succ = wakee | |
3323 // 2. membar #loadstore|#storestore; | |
3324 // 2. ST _owner = NULL | |
3325 // 3. unpark(wakee) | |
3326 | |
3327 _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ; | |
3328 ParkEvent * Trigger = Wakee->_event ; | |
3329 | |
3330 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again. | |
3331 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be | |
3332 // out-of-scope (non-extant). | |
3333 Wakee = NULL ; | |
3334 | |
3335 // Drop the lock | |
3336 OrderAccess::release_store_ptr (&_owner, NULL) ; | |
3337 OrderAccess::fence() ; // ST _owner vs LD in unpark() | |
3338 | |
3339 // TODO-FIXME: | |
3340 // If there's a safepoint pending the best policy would be to | |
3341 // get _this thread to a safepoint and only wake the successor | |
3342 // after the safepoint completed. monitorexit uses a "leaf" | |
3343 // state transition, however, so this thread can't become | |
3344 // safe at this point in time. (Its stack isn't walkable). | |
3345 // The next best thing is to defer waking the successor by | |
3346 // adding to a list of thread to be unparked after at the | |
3347 // end of the forthcoming STW). | |
3348 if (SafepointSynchronize::do_call_back()) { | |
3349 TEVENT (unpark before SAFEPOINT) ; | |
3350 } | |
3351 | |
3352 // Possible optimizations ... | |
3353 // | |
3354 // * Consider: set Wakee->UnparkTime = timeNow() | |
3355 // When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()). | |
3356 // By measuring recent ONPROC latency we can approximate the | |
3357 // system load. In turn, we can feed that information back | |
3358 // into the spinning & succession policies. | |
3359 // (ONPROC latency correlates strongly with load). | |
3360 // | |
3361 // * Pull affinity: | |
3362 // If the wakee is cold then transiently setting it's affinity | |
3363 // to the current CPU is a good idea. | |
3364 // See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt | |
478
a7fac4381b50
6639341: sometimes contended-exit event comes after contended-entered on another thread
blacklion
parents:
0
diff
changeset
|
3365 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); |
0 | 3366 Trigger->unpark() ; |
3367 | |
3368 // Maintain stats and report events to JVMTI | |
3369 if (ObjectSynchronizer::_sync_Parks != NULL) { | |
3370 ObjectSynchronizer::_sync_Parks->inc() ; | |
3371 } | |
3372 } | |
3373 | |
3374 | |
3375 // exit() | |
3376 // ~~~~~~ | |
3377 // Note that the collector can't reclaim the objectMonitor or deflate | |
3378 // the object out from underneath the thread calling ::exit() as the | |
3379 // thread calling ::exit() never transitions to a stable state. | |
3380 // This inhibits GC, which in turn inhibits asynchronous (and | |
3381 // inopportune) reclamation of "this". | |
3382 // | |
3383 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ; | |
3384 // There's one exception to the claim above, however. EnterI() can call | |
3385 // exit() to drop a lock if the acquirer has been externally suspended. | |
3386 // In that case exit() is called with _thread_state as _thread_blocked, | |
3387 // but the monitor's _count field is > 0, which inhibits reclamation. | |
3388 // | |
3389 // 1-0 exit | |
3390 // ~~~~~~~~ | |
3391 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of | |
3392 // the fast-path operators have been optimized so the common ::exit() | |
3393 // operation is 1-0. See i486.ad fast_unlock(), for instance. | |
3394 // The code emitted by fast_unlock() elides the usual MEMBAR. This | |
3395 // greatly improves latency -- MEMBAR and CAS having considerable local | |
3396 // latency on modern processors -- but at the cost of "stranding". Absent the | |
3397 // MEMBAR, a thread in fast_unlock() can race a thread in the slow | |
3398 // ::enter() path, resulting in the entering thread being stranding | |
3399 // and a progress-liveness failure. Stranding is extremely rare. | |
3400 // We use timers (timed park operations) & periodic polling to detect | |
3401 // and recover from stranding. Potentially stranded threads periodically | |
3402 // wake up and poll the lock. See the usage of the _Responsible variable. | |
3403 // | |
3404 // The CAS() in enter provides for safety and exclusion, while the CAS or | |
3405 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking | |
3406 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding. | |
3407 // We detect and recover from stranding with timers. | |
3408 // | |
3409 // If a thread transiently strands it'll park until (a) another | |
3410 // thread acquires the lock and then drops the lock, at which time the | |
3411 // exiting thread will notice and unpark the stranded thread, or, (b) | |
3412 // the timer expires. If the lock is high traffic then the stranding latency | |
3413 // will be low due to (a). If the lock is low traffic then the odds of | |
3414 // stranding are lower, although the worst-case stranding latency | |
3415 // is longer. Critically, we don't want to put excessive load in the | |
3416 // platform's timer subsystem. We want to minimize both the timer injection | |
3417 // rate (timers created/sec) as well as the number of timers active at | |
3418 // any one time. (more precisely, we want to minimize timer-seconds, which is | |
3419 // the integral of the # of active timers at any instant over time). | |
3420 // Both impinge on OS scalability. Given that, at most one thread parked on | |
3421 // a monitor will use a timer. | |
3422 | |
3423 void ATTR ObjectMonitor::exit(TRAPS) { | |
3424 Thread * Self = THREAD ; | |
3425 if (THREAD != _owner) { | |
3426 if (THREAD->is_lock_owned((address) _owner)) { | |
3427 // Transmute _owner from a BasicLock pointer to a Thread address. | |
3428 // We don't need to hold _mutex for this transition. | |
3429 // Non-null to Non-null is safe as long as all readers can | |
3430 // tolerate either flavor. | |
3431 assert (_recursions == 0, "invariant") ; | |
3432 _owner = THREAD ; | |
3433 _recursions = 0 ; | |
3434 OwnerIsThread = 1 ; | |
3435 } else { | |
3436 // NOTE: we need to handle unbalanced monitor enter/exit | |
3437 // in native code by throwing an exception. | |
3438 // TODO: Throw an IllegalMonitorStateException ? | |
3439 TEVENT (Exit - Throw IMSX) ; | |
3440 assert(false, "Non-balanced monitor enter/exit!"); | |
3441 if (false) { | |
3442 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); | |
3443 } | |
3444 return; | |
3445 } | |
3446 } | |
3447 | |
3448 if (_recursions != 0) { | |
3449 _recursions--; // this is simple recursive enter | |
3450 TEVENT (Inflated exit - recursive) ; | |
3451 return ; | |
3452 } | |
3453 | |
3454 // Invariant: after setting Responsible=null an thread must execute | |
3455 // a MEMBAR or other serializing instruction before fetching EntryList|cxq. | |
3456 if ((SyncFlags & 4) == 0) { | |
3457 _Responsible = NULL ; | |
3458 } | |
3459 | |
3460 for (;;) { | |
3461 assert (THREAD == _owner, "invariant") ; | |
3462 | |
3463 // Fast-path monitor exit: | |
3464 // | |
3465 // Observe the Dekker/Lamport duality: | |
3466 // A thread in ::exit() executes: | |
3467 // ST Owner=null; MEMBAR; LD EntryList|cxq. | |
3468 // A thread in the contended ::enter() path executes the complementary: | |
3469 // ST EntryList|cxq = nonnull; MEMBAR; LD Owner. | |
3470 // | |
3471 // Note that there's a benign race in the exit path. We can drop the | |
3472 // lock, another thread can reacquire the lock immediately, and we can | |
3473 // then wake a thread unnecessarily (yet another flavor of futile wakeup). | |
3474 // This is benign, and we've structured the code so the windows are short | |
3475 // and the frequency of such futile wakeups is low. | |
3476 // | |
3477 // We could eliminate the race by encoding both the "LOCKED" state and | |
3478 // the queue head in a single word. Exit would then use either CAS to | |
3479 // clear the LOCKED bit/byte. This precludes the desirable 1-0 optimization, | |
3480 // however. | |
3481 // | |
3482 // Possible fast-path ::exit() optimization: | |
3483 // The current fast-path exit implementation fetches both cxq and EntryList. | |
3484 // See also i486.ad fast_unlock(). Testing has shown that two LDs | |
3485 // isn't measurably slower than a single LD on any platforms. | |
3486 // Still, we could reduce the 2 LDs to one or zero by one of the following: | |
3487 // | |
3488 // - Use _count instead of cxq|EntryList | |
3489 // We intend to eliminate _count, however, when we switch | |
3490 // to on-the-fly deflation in ::exit() as is used in | |
3491 // Metalocks and RelaxedLocks. | |
3492 // | |
3493 // - Establish the invariant that cxq == null implies EntryList == null. | |
3494 // set cxq == EMPTY (1) to encode the state where cxq is empty | |
3495 // by EntryList != null. EMPTY is a distinguished value. | |
3496 // The fast-path exit() would fetch cxq but not EntryList. | |
3497 // | |
3498 // - Encode succ as follows: | |
3499 // succ = t : Thread t is the successor -- t is ready or is spinning. | |
3500 // Exiting thread does not need to wake a successor. | |
3501 // succ = 0 : No successor required -> (EntryList|cxq) == null | |
3502 // Exiting thread does not need to wake a successor | |
3503 // succ = 1 : Successor required -> (EntryList|cxq) != null and | |
3504 // logically succ == null. | |
3505 // Exiting thread must wake a successor. | |
3506 // | |
3507 // The 1-1 fast-exit path would appear as : | |
3508 // _owner = null ; membar ; | |
3509 // if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath | |
3510 // goto FastPathDone ; | |
3511 // | |
3512 // and the 1-0 fast-exit path would appear as: | |
3513 // if (_succ == 1) goto SlowPath | |
3514 // Owner = null ; | |
3515 // goto FastPathDone | |
3516 // | |
3517 // - Encode the LSB of _owner as 1 to indicate that exit() | |
3518 // must use the slow-path and make a successor ready. | |
3519 // (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null | |
3520 // (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously) | |
3521 // The 1-0 fast exit path would read: | |
3522 // if (_owner != Self) goto SlowPath | |
3523 // _owner = null | |
3524 // goto FastPathDone | |
3525 | |
3526 if (Knob_ExitPolicy == 0) { | |
3527 // release semantics: prior loads and stores from within the critical section | |
3528 // must not float (reorder) past the following store that drops the lock. | |
3529 // On SPARC that requires MEMBAR #loadstore|#storestore. | |
3530 // But of course in TSO #loadstore|#storestore is not required. | |
3531 // I'd like to write one of the following: | |
3532 // A. OrderAccess::release() ; _owner = NULL | |
3533 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL; | |
3534 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both | |
3535 // store into a _dummy variable. That store is not needed, but can result | |
3536 // in massive wasteful coherency traffic on classic SMP systems. | |
3537 // Instead, I use release_store(), which is implemented as just a simple | |
3538 // ST on x64, x86 and SPARC. | |
3539 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock | |
3540 OrderAccess::storeload() ; // See if we need to wake a successor | |
3541 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { | |
3542 TEVENT (Inflated exit - simple egress) ; | |
3543 return ; | |
3544 } | |
3545 TEVENT (Inflated exit - complex egress) ; | |
3546 | |
3547 // Normally the exiting thread is responsible for ensuring succession, | |
3548 // but if other successors are ready or other entering threads are spinning | |
3549 // then this thread can simply store NULL into _owner and exit without | |
3550 // waking a successor. The existence of spinners or ready successors | |
3551 // guarantees proper succession (liveness). Responsibility passes to the | |
3552 // ready or running successors. The exiting thread delegates the duty. | |
3553 // More precisely, if a successor already exists this thread is absolved | |
3554 // of the responsibility of waking (unparking) one. | |
3555 // | |
3556 // The _succ variable is critical to reducing futile wakeup frequency. | |
3557 // _succ identifies the "heir presumptive" thread that has been made | |
3558 // ready (unparked) but that has not yet run. We need only one such | |
3559 // successor thread to guarantee progress. | |
3560 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf | |
3561 // section 3.3 "Futile Wakeup Throttling" for details. | |
3562 // | |
3563 // Note that spinners in Enter() also set _succ non-null. | |
3564 // In the current implementation spinners opportunistically set | |
3565 // _succ so that exiting threads might avoid waking a successor. | |
3566 // Another less appealing alternative would be for the exiting thread | |
3567 // to drop the lock and then spin briefly to see if a spinner managed | |
3568 // to acquire the lock. If so, the exiting thread could exit | |
3569 // immediately without waking a successor, otherwise the exiting | |
3570 // thread would need to dequeue and wake a successor. | |
3571 // (Note that we'd need to make the post-drop spin short, but no | |
3572 // shorter than the worst-case round-trip cache-line migration time. | |
3573 // The dropped lock needs to become visible to the spinner, and then | |
3574 // the acquisition of the lock by the spinner must become visible to | |
3575 // the exiting thread). | |
3576 // | |
3577 | |
3578 // It appears that an heir-presumptive (successor) must be made ready. | |
3579 // Only the current lock owner can manipulate the EntryList or | |
3580 // drain _cxq, so we need to reacquire the lock. If we fail | |
3581 // to reacquire the lock the responsibility for ensuring succession | |
3582 // falls to the new owner. | |
3583 // | |
3584 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { | |
3585 return ; | |
3586 } | |
3587 TEVENT (Exit - Reacquired) ; | |
3588 } else { | |
3589 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { | |
3590 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock | |
3591 OrderAccess::storeload() ; | |
3592 // Ratify the previously observed values. | |
3593 if (_cxq == NULL || _succ != NULL) { | |
3594 TEVENT (Inflated exit - simple egress) ; | |
3595 return ; | |
3596 } | |
3597 | |
3598 // inopportune interleaving -- the exiting thread (this thread) | |
3599 // in the fast-exit path raced an entering thread in the slow-enter | |
3600 // path. | |
3601 // We have two choices: | |
3602 // A. Try to reacquire the lock. | |
3603 // If the CAS() fails return immediately, otherwise | |
3604 // we either restart/rerun the exit operation, or simply | |
3605 // fall-through into the code below which wakes a successor. | |
3606 // B. If the elements forming the EntryList|cxq are TSM | |
3607 // we could simply unpark() the lead thread and return | |
3608 // without having set _succ. | |
3609 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { | |
3610 TEVENT (Inflated exit - reacquired succeeded) ; | |
3611 return ; | |
3612 } | |
3613 TEVENT (Inflated exit - reacquired failed) ; | |
3614 } else { | |
3615 TEVENT (Inflated exit - complex egress) ; | |
3616 } | |
3617 } | |
3618 | |
3619 guarantee (_owner == THREAD, "invariant") ; | |
3620 | |
3621 // Select an appropriate successor ("heir presumptive") from the EntryList | |
3622 // and make it ready. Generally we just wake the head of EntryList . | |
3623 // There's no algorithmic constraint that we use the head - it's just | |
3624 // a policy decision. Note that the thread at head of the EntryList | |
3625 // remains at the head until it acquires the lock. This means we'll | |
3626 // repeatedly wake the same thread until it manages to grab the lock. | |
3627 // This is generally a good policy - if we're seeing lots of futile wakeups | |
3628 // at least we're waking/rewaking a thread that's like to be hot or warm | |
3629 // (have residual D$ and TLB affinity). | |
3630 // | |
3631 // "Wakeup locality" optimization: | |
3632 // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt | |
3633 // In the future we'll try to bias the selection mechanism | |
3634 // to preferentially pick a thread that recently ran on | |
3635 // a processor element that shares cache with the CPU on which | |
3636 // the exiting thread is running. We need access to Solaris' | |
3637 // schedctl.sc_cpu to make that work. | |
3638 // | |
3639 ObjectWaiter * w = NULL ; | |
3640 int QMode = Knob_QMode ; | |
3641 | |
3642 if (QMode == 2 && _cxq != NULL) { | |
3643 // QMode == 2 : cxq has precedence over EntryList. | |
3644 // Try to directly wake a successor from the cxq. | |
3645 // If successful, the successor will need to unlink itself from cxq. | |
3646 w = _cxq ; | |
3647 assert (w != NULL, "invariant") ; | |
3648 assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ; | |
3649 ExitEpilog (Self, w) ; | |
3650 return ; | |
3651 } | |
3652 | |
3653 if (QMode == 3 && _cxq != NULL) { | |
3654 // Aggressively drain cxq into EntryList at the first opportunity. | |
3655 // This policy ensure that recently-run threads live at the head of EntryList. | |
3656 // Drain _cxq into EntryList - bulk transfer. | |
3657 // First, detach _cxq. | |
3658 // The following loop is tantamount to: w = swap (&cxq, NULL) | |
3659 w = _cxq ; | |
3660 for (;;) { | |
3661 assert (w != NULL, "Invariant") ; | |
3662 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; | |
3663 if (u == w) break ; | |
3664 w = u ; | |
3665 } | |
3666 assert (w != NULL , "invariant") ; | |
3667 | |
3668 ObjectWaiter * q = NULL ; | |
3669 ObjectWaiter * p ; | |
3670 for (p = w ; p != NULL ; p = p->_next) { | |
3671 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; | |
3672 p->TState = ObjectWaiter::TS_ENTER ; | |
3673 p->_prev = q ; | |
3674 q = p ; | |
3675 } | |
3676 | |
3677 // Append the RATs to the EntryList | |
3678 // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time. | |
3679 ObjectWaiter * Tail ; | |
3680 for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ; | |
3681 if (Tail == NULL) { | |
3682 _EntryList = w ; | |
3683 } else { | |
3684 Tail->_next = w ; | |
3685 w->_prev = Tail ; | |
3686 } | |
3687 | |
3688 // Fall thru into code that tries to wake a successor from EntryList | |
3689 } | |
3690 | |
3691 if (QMode == 4 && _cxq != NULL) { | |
3692 // Aggressively drain cxq into EntryList at the first opportunity. | |
3693 // This policy ensure that recently-run threads live at the head of EntryList. | |
3694 | |
3695 // Drain _cxq into EntryList - bulk transfer. | |
3696 // First, detach _cxq. | |
3697 // The following loop is tantamount to: w = swap (&cxq, NULL) | |
3698 w = _cxq ; | |
3699 for (;;) { | |
3700 assert (w != NULL, "Invariant") ; | |
3701 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; | |
3702 if (u == w) break ; | |
3703 w = u ; | |
3704 } | |
3705 assert (w != NULL , "invariant") ; | |
3706 | |
3707 ObjectWaiter * q = NULL ; | |
3708 ObjectWaiter * p ; | |
3709 for (p = w ; p != NULL ; p = p->_next) { | |
3710 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; | |
3711 p->TState = ObjectWaiter::TS_ENTER ; | |
3712 p->_prev = q ; | |
3713 q = p ; | |
3714 } | |
3715 | |
3716 // Prepend the RATs to the EntryList | |
3717 if (_EntryList != NULL) { | |
3718 q->_next = _EntryList ; | |
3719 _EntryList->_prev = q ; | |
3720 } | |
3721 _EntryList = w ; | |
3722 | |
3723 // Fall thru into code that tries to wake a successor from EntryList | |
3724 } | |
3725 | |
3726 w = _EntryList ; | |
3727 if (w != NULL) { | |
3728 // I'd like to write: guarantee (w->_thread != Self). | |
3729 // But in practice an exiting thread may find itself on the EntryList. | |
3730 // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and | |
3731 // then calls exit(). Exit release the lock by setting O._owner to NULL. | |
3732 // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The | |
3733 // notify() operation moves T1 from O's waitset to O's EntryList. T2 then | |
3734 // release the lock "O". T2 resumes immediately after the ST of null into | |
3735 // _owner, above. T2 notices that the EntryList is populated, so it | |
3736 // reacquires the lock and then finds itself on the EntryList. | |
3737 // Given all that, we have to tolerate the circumstance where "w" is | |
3738 // associated with Self. | |
3739 assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; | |
3740 ExitEpilog (Self, w) ; | |
3741 return ; | |
3742 } | |
3743 | |
3744 // If we find that both _cxq and EntryList are null then just | |
3745 // re-run the exit protocol from the top. | |
3746 w = _cxq ; | |
3747 if (w == NULL) continue ; | |
3748 | |
3749 // Drain _cxq into EntryList - bulk transfer. | |
3750 // First, detach _cxq. | |
3751 // The following loop is tantamount to: w = swap (&cxq, NULL) | |
3752 for (;;) { | |
3753 assert (w != NULL, "Invariant") ; | |
3754 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; | |
3755 if (u == w) break ; | |
3756 w = u ; | |
3757 } | |
3758 TEVENT (Inflated exit - drain cxq into EntryList) ; | |
3759 | |
3760 assert (w != NULL , "invariant") ; | |
3761 assert (_EntryList == NULL , "invariant") ; | |
3762 | |
3763 // Convert the LIFO SLL anchored by _cxq into a DLL. | |
3764 // The list reorganization step operates in O(LENGTH(w)) time. | |
3765 // It's critical that this step operate quickly as | |
3766 // "Self" still holds the outer-lock, restricting parallelism | |
3767 // and effectively lengthening the critical section. | |
3768 // Invariant: s chases t chases u. | |
3769 // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so | |
3770 // we have faster access to the tail. | |
3771 | |
3772 if (QMode == 1) { | |
3773 // QMode == 1 : drain cxq to EntryList, reversing order | |
3774 // We also reverse the order of the list. | |
3775 ObjectWaiter * s = NULL ; | |
3776 ObjectWaiter * t = w ; | |
3777 ObjectWaiter * u = NULL ; | |
3778 while (t != NULL) { | |
3779 guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ; | |
3780 t->TState = ObjectWaiter::TS_ENTER ; | |
3781 u = t->_next ; | |
3782 t->_prev = u ; | |
3783 t->_next = s ; | |
3784 s = t; | |
3785 t = u ; | |
3786 } | |
3787 _EntryList = s ; | |
3788 assert (s != NULL, "invariant") ; | |
3789 } else { | |
3790 // QMode == 0 or QMode == 2 | |
3791 _EntryList = w ; | |
3792 ObjectWaiter * q = NULL ; | |
3793 ObjectWaiter * p ; | |
3794 for (p = w ; p != NULL ; p = p->_next) { | |
3795 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; | |
3796 p->TState = ObjectWaiter::TS_ENTER ; | |
3797 p->_prev = q ; | |
3798 q = p ; | |
3799 } | |
3800 } | |
3801 | |
3802 // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL | |
3803 // The MEMBAR is satisfied by the release_store() operation in ExitEpilog(). | |
3804 | |
3805 // See if we can abdicate to a spinner instead of waking a thread. | |
3806 // A primary goal of the implementation is to reduce the | |
3807 // context-switch rate. | |
3808 if (_succ != NULL) continue; | |
3809 | |
3810 w = _EntryList ; | |
3811 if (w != NULL) { | |
3812 guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; | |
3813 ExitEpilog (Self, w) ; | |
3814 return ; | |
3815 } | |
3816 } | |
3817 } | |
3818 // complete_exit exits a lock returning recursion count | |
3819 // complete_exit/reenter operate as a wait without waiting | |
3820 // complete_exit requires an inflated monitor | |
3821 // The _owner field is not always the Thread addr even with an | |
3822 // inflated monitor, e.g. the monitor can be inflated by a non-owning | |
3823 // thread due to contention. | |
3824 intptr_t ObjectMonitor::complete_exit(TRAPS) { | |
3825 Thread * const Self = THREAD; | |
3826 assert(Self->is_Java_thread(), "Must be Java thread!"); | |
3827 JavaThread *jt = (JavaThread *)THREAD; | |
3828 | |
3829 DeferredInitialize(); | |
3830 | |
3831 if (THREAD != _owner) { | |
3832 if (THREAD->is_lock_owned ((address)_owner)) { | |
3833 assert(_recursions == 0, "internal state error"); | |
3834 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ | |
3835 _recursions = 0 ; | |
3836 OwnerIsThread = 1 ; | |
3837 } | |
3838 } | |
3839 | |
3840 guarantee(Self == _owner, "complete_exit not owner"); | |
3841 intptr_t save = _recursions; // record the old recursion count | |
3842 _recursions = 0; // set the recursion level to be 0 | |
3843 exit (Self) ; // exit the monitor | |
3844 guarantee (_owner != Self, "invariant"); | |
3845 return save; | |
3846 } | |
3847 | |
3848 // reenter() enters a lock and sets recursion count | |
3849 // complete_exit/reenter operate as a wait without waiting | |
3850 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { | |
3851 Thread * const Self = THREAD; | |
3852 assert(Self->is_Java_thread(), "Must be Java thread!"); | |
3853 JavaThread *jt = (JavaThread *)THREAD; | |
3854 | |
3855 guarantee(_owner != Self, "reenter already owner"); | |
3856 enter (THREAD); // enter the monitor | |
3857 guarantee (_recursions == 0, "reenter recursion"); | |
3858 _recursions = recursions; | |
3859 return; | |
3860 } | |
3861 | |
3862 // Note: a subset of changes to ObjectMonitor::wait() | |
3863 // will need to be replicated in complete_exit above | |
3864 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { | |
3865 Thread * const Self = THREAD ; | |
3866 assert(Self->is_Java_thread(), "Must be Java thread!"); | |
3867 JavaThread *jt = (JavaThread *)THREAD; | |
3868 | |
3869 DeferredInitialize () ; | |
3870 | |
3871 // Throw IMSX or IEX. | |
3872 CHECK_OWNER(); | |
3873 | |
3874 // check for a pending interrupt | |
3875 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { | |
3876 // post monitor waited event. Note that this is past-tense, we are done waiting. | |
3877 if (JvmtiExport::should_post_monitor_waited()) { | |
3878 // Note: 'false' parameter is passed here because the | |
3879 // wait was not timed out due to thread interrupt. | |
3880 JvmtiExport::post_monitor_waited(jt, this, false); | |
3881 } | |
3882 TEVENT (Wait - Throw IEX) ; | |
3883 THROW(vmSymbols::java_lang_InterruptedException()); | |
3884 return ; | |
3885 } | |
3886 TEVENT (Wait) ; | |
3887 | |
3888 assert (Self->_Stalled == 0, "invariant") ; | |
3889 Self->_Stalled = intptr_t(this) ; | |
3890 jt->set_current_waiting_monitor(this); | |
3891 | |
3892 // create a node to be put into the queue | |
3893 // Critically, after we reset() the event but prior to park(), we must check | |
3894 // for a pending interrupt. | |
3895 ObjectWaiter node(Self); | |
3896 node.TState = ObjectWaiter::TS_WAIT ; | |
3897 Self->_ParkEvent->reset() ; | |
3898 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag | |
3899 | |
3900 // Enter the waiting queue, which is a circular doubly linked list in this case | |
3901 // but it could be a priority queue or any data structure. | |
3902 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only | |
3903 // by the the owner of the monitor *except* in the case where park() | |
3904 // returns because of a timeout of interrupt. Contention is exceptionally rare | |
3905 // so we use a simple spin-lock instead of a heavier-weight blocking lock. | |
3906 | |
3907 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ; | |
3908 AddWaiter (&node) ; | |
3909 Thread::SpinRelease (&_WaitSetLock) ; | |
3910 | |
3911 if ((SyncFlags & 4) == 0) { | |
3912 _Responsible = NULL ; | |
3913 } | |
3914 intptr_t save = _recursions; // record the old recursion count | |
3915 _waiters++; // increment the number of waiters | |
3916 _recursions = 0; // set the recursion level to be 1 | |
3917 exit (Self) ; // exit the monitor | |
3918 guarantee (_owner != Self, "invariant") ; | |
3919 | |
3920 // As soon as the ObjectMonitor's ownership is dropped in the exit() | |
3921 // call above, another thread can enter() the ObjectMonitor, do the | |
3922 // notify(), and exit() the ObjectMonitor. If the other thread's | |
3923 // exit() call chooses this thread as the successor and the unpark() | |
3924 // call happens to occur while this thread is posting a | |
3925 // MONITOR_CONTENDED_EXIT event, then we run the risk of the event | |
3926 // handler using RawMonitors and consuming the unpark(). | |
3927 // | |
3928 // To avoid the problem, we re-post the event. This does no harm | |
3929 // even if the original unpark() was not consumed because we are the | |
3930 // chosen successor for this monitor. | |
3931 if (node._notified != 0 && _succ == Self) { | |
3932 node._event->unpark(); | |
3933 } | |
3934 | |
3935 // The thread is on the WaitSet list - now park() it. | |
3936 // On MP systems it's conceivable that a brief spin before we park | |
3937 // could be profitable. | |
3938 // | |
3939 // TODO-FIXME: change the following logic to a loop of the form | |
3940 // while (!timeout && !interrupted && _notified == 0) park() | |
3941 | |
3942 int ret = OS_OK ; | |
3943 int WasNotified = 0 ; | |
3944 { // State transition wrappers | |
3945 OSThread* osthread = Self->osthread(); | |
3946 OSThreadWaitState osts(osthread, true); | |
3947 { | |
3948 ThreadBlockInVM tbivm(jt); | |
3949 // Thread is in thread_blocked state and oop access is unsafe. | |
3950 jt->set_suspend_equivalent(); | |
3951 | |
3952 if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) { | |
3953 // Intentionally empty | |
3954 } else | |
3955 if (node._notified == 0) { | |
3956 if (millis <= 0) { | |
3957 Self->_ParkEvent->park () ; | |
3958 } else { | |
3959 ret = Self->_ParkEvent->park (millis) ; | |
3960 } | |
3961 } | |
3962 | |
3963 // were we externally suspended while we were waiting? | |
3964 if (ExitSuspendEquivalent (jt)) { | |
3965 // TODO-FIXME: add -- if succ == Self then succ = null. | |
3966 jt->java_suspend_self(); | |
3967 } | |
3968 | |
3969 } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm | |
3970 | |
3971 | |
3972 // Node may be on the WaitSet, the EntryList (or cxq), or in transition | |
3973 // from the WaitSet to the EntryList. | |
3974 // See if we need to remove Node from the WaitSet. | |
3975 // We use double-checked locking to avoid grabbing _WaitSetLock | |
3976 // if the thread is not on the wait queue. | |
3977 // | |
3978 // Note that we don't need a fence before the fetch of TState. | |
3979 // In the worst case we'll fetch a old-stale value of TS_WAIT previously | |
3980 // written by the is thread. (perhaps the fetch might even be satisfied | |
3981 // by a look-aside into the processor's own store buffer, although given | |
3982 // the length of the code path between the prior ST and this load that's | |
3983 // highly unlikely). If the following LD fetches a stale TS_WAIT value | |
3984 // then we'll acquire the lock and then re-fetch a fresh TState value. | |
3985 // That is, we fail toward safety. | |
3986 | |
3987 if (node.TState == ObjectWaiter::TS_WAIT) { | |
3988 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ; | |
3989 if (node.TState == ObjectWaiter::TS_WAIT) { | |
3990 DequeueSpecificWaiter (&node) ; // unlink from WaitSet | |
3991 assert(node._notified == 0, "invariant"); | |
3992 node.TState = ObjectWaiter::TS_RUN ; | |
3993 } | |
3994 Thread::SpinRelease (&_WaitSetLock) ; | |
3995 } | |
3996 | |
3997 // The thread is now either on off-list (TS_RUN), | |
3998 // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ). | |
3999 // The Node's TState variable is stable from the perspective of this thread. | |
4000 // No other threads will asynchronously modify TState. | |
4001 guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ; | |
4002 OrderAccess::loadload() ; | |
4003 if (_succ == Self) _succ = NULL ; | |
4004 WasNotified = node._notified ; | |
4005 | |
4006 // Reentry phase -- reacquire the monitor. | |
4007 // re-enter contended monitor after object.wait(). | |
4008 // retain OBJECT_WAIT state until re-enter successfully completes | |
4009 // Thread state is thread_in_vm and oop access is again safe, | |
4010 // although the raw address of the object may have changed. | |
4011 // (Don't cache naked oops over safepoints, of course). | |
4012 | |
4013 // post monitor waited event. Note that this is past-tense, we are done waiting. | |
4014 if (JvmtiExport::should_post_monitor_waited()) { | |
4015 JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT); | |
4016 } | |
4017 OrderAccess::fence() ; | |
4018 | |
4019 assert (Self->_Stalled != 0, "invariant") ; | |
4020 Self->_Stalled = 0 ; | |
4021 | |
4022 assert (_owner != Self, "invariant") ; | |
4023 ObjectWaiter::TStates v = node.TState ; | |
4024 if (v == ObjectWaiter::TS_RUN) { | |
4025 enter (Self) ; | |
4026 } else { | |
4027 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; | |
4028 ReenterI (Self, &node) ; | |
4029 node.wait_reenter_end(this); | |
4030 } | |
4031 | |
4032 // Self has reacquired the lock. | |
4033 // Lifecycle - the node representing Self must not appear on any queues. | |
4034 // Node is about to go out-of-scope, but even if it were immortal we wouldn't | |
4035 // want residual elements associated with this thread left on any lists. | |
4036 guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ; | |
4037 assert (_owner == Self, "invariant") ; | |
4038 assert (_succ != Self , "invariant") ; | |
4039 } // OSThreadWaitState() | |
4040 | |
4041 jt->set_current_waiting_monitor(NULL); | |
4042 | |
4043 guarantee (_recursions == 0, "invariant") ; | |
4044 _recursions = save; // restore the old recursion count | |
4045 _waiters--; // decrement the number of waiters | |
4046 | |
4047 // Verify a few postconditions | |
4048 assert (_owner == Self , "invariant") ; | |
4049 assert (_succ != Self , "invariant") ; | |
4050 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; | |
4051 | |
4052 if (SyncFlags & 32) { | |
4053 OrderAccess::fence() ; | |
4054 } | |
4055 | |
4056 // check if the notification happened | |
4057 if (!WasNotified) { | |
4058 // no, it could be timeout or Thread.interrupt() or both | |
4059 // check for interrupt event, otherwise it is timeout | |
4060 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { | |
4061 TEVENT (Wait - throw IEX from epilog) ; | |
4062 THROW(vmSymbols::java_lang_InterruptedException()); | |
4063 } | |
4064 } | |
4065 | |
4066 // NOTE: Spurious wake up will be consider as timeout. | |
4067 // Monitor notify has precedence over thread interrupt. | |
4068 } | |
4069 | |
4070 | |
4071 // Consider: | |
4072 // If the lock is cool (cxq == null && succ == null) and we're on an MP system | |
4073 // then instead of transferring a thread from the WaitSet to the EntryList | |
4074 // we might just dequeue a thread from the WaitSet and directly unpark() it. | |
4075 | |
4076 void ObjectMonitor::notify(TRAPS) { | |
4077 CHECK_OWNER(); | |
4078 if (_WaitSet == NULL) { | |
4079 TEVENT (Empty-Notify) ; | |
4080 return ; | |
4081 } | |
4082 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD); | |
4083 | |
4084 int Policy = Knob_MoveNotifyee ; | |
4085 | |
4086 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ; | |
4087 ObjectWaiter * iterator = DequeueWaiter() ; | |
4088 if (iterator != NULL) { | |
4089 TEVENT (Notify1 - Transfer) ; | |
4090 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; | |
4091 guarantee (iterator->_notified == 0, "invariant") ; | |
4092 // Disposition - what might we do with iterator ? | |
4093 // a. add it directly to the EntryList - either tail or head. | |
4094 // b. push it onto the front of the _cxq. | |
4095 // For now we use (a). | |
4096 if (Policy != 4) { | |
4097 iterator->TState = ObjectWaiter::TS_ENTER ; | |
4098 } | |
4099 iterator->_notified = 1 ; | |
4100 | |
4101 ObjectWaiter * List = _EntryList ; | |
4102 if (List != NULL) { | |
4103 assert (List->_prev == NULL, "invariant") ; | |
4104 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; | |
4105 assert (List != iterator, "invariant") ; | |
4106 } | |
4107 | |
4108 if (Policy == 0) { // prepend to EntryList | |
4109 if (List == NULL) { | |
4110 iterator->_next = iterator->_prev = NULL ; | |
4111 _EntryList = iterator ; | |
4112 } else { | |
4113 List->_prev = iterator ; | |
4114 iterator->_next = List ; | |
4115 iterator->_prev = NULL ; | |
4116 _EntryList = iterator ; | |
4117 } | |
4118 } else | |
4119 if (Policy == 1) { // append to EntryList | |
4120 if (List == NULL) { | |
4121 iterator->_next = iterator->_prev = NULL ; | |
4122 _EntryList = iterator ; | |
4123 } else { | |
4124 // CONSIDER: finding the tail currently requires a linear-time walk of | |
4125 // the EntryList. We can make tail access constant-time by converting to | |
4126 // a CDLL instead of using our current DLL. | |
4127 ObjectWaiter * Tail ; | |
4128 for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; | |
4129 assert (Tail != NULL && Tail->_next == NULL, "invariant") ; | |
4130 Tail->_next = iterator ; | |
4131 iterator->_prev = Tail ; | |
4132 iterator->_next = NULL ; | |
4133 } | |
4134 } else | |
4135 if (Policy == 2) { // prepend to cxq | |
4136 // prepend to cxq | |
4137 if (List == NULL) { | |
4138 iterator->_next = iterator->_prev = NULL ; | |
4139 _EntryList = iterator ; | |
4140 } else { | |
4141 iterator->TState = ObjectWaiter::TS_CXQ ; | |
4142 for (;;) { | |
4143 ObjectWaiter * Front = _cxq ; | |
4144 iterator->_next = Front ; | |
4145 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { | |
4146 break ; | |
4147 } | |
4148 } | |
4149 } | |
4150 } else | |
4151 if (Policy == 3) { // append to cxq | |
4152 iterator->TState = ObjectWaiter::TS_CXQ ; | |
4153 for (;;) { | |
4154 ObjectWaiter * Tail ; | |
4155 Tail = _cxq ; | |
4156 if (Tail == NULL) { | |
4157 iterator->_next = NULL ; | |
4158 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { | |
4159 break ; | |
4160 } | |
4161 } else { | |
4162 while (Tail->_next != NULL) Tail = Tail->_next ; | |
4163 Tail->_next = iterator ; | |
4164 iterator->_prev = Tail ; | |
4165 iterator->_next = NULL ; | |
4166 break ; | |
4167 } | |
4168 } | |
4169 } else { | |
4170 ParkEvent * ev = iterator->_event ; | |
4171 iterator->TState = ObjectWaiter::TS_RUN ; | |
4172 OrderAccess::fence() ; | |
4173 ev->unpark() ; | |
4174 } | |
4175 | |
4176 if (Policy < 4) { | |
4177 iterator->wait_reenter_begin(this); | |
4178 } | |
4179 | |
4180 // _WaitSetLock protects the wait queue, not the EntryList. We could | |
4181 // move the add-to-EntryList operation, above, outside the critical section | |
4182 // protected by _WaitSetLock. In practice that's not useful. With the | |
4183 // exception of wait() timeouts and interrupts the monitor owner | |
4184 // is the only thread that grabs _WaitSetLock. There's almost no contention | |
4185 // on _WaitSetLock so it's not profitable to reduce the length of the | |
4186 // critical section. | |
4187 } | |
4188 | |
4189 Thread::SpinRelease (&_WaitSetLock) ; | |
4190 | |
4191 if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) { | |
4192 ObjectSynchronizer::_sync_Notifications->inc() ; | |
4193 } | |
4194 } | |
4195 | |
4196 | |
4197 void ObjectMonitor::notifyAll(TRAPS) { | |
4198 CHECK_OWNER(); | |
4199 ObjectWaiter* iterator; | |
4200 if (_WaitSet == NULL) { | |
4201 TEVENT (Empty-NotifyAll) ; | |
4202 return ; | |
4203 } | |
4204 DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD); | |
4205 | |
4206 int Policy = Knob_MoveNotifyee ; | |
4207 int Tally = 0 ; | |
4208 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ; | |
4209 | |
4210 for (;;) { | |
4211 iterator = DequeueWaiter () ; | |
4212 if (iterator == NULL) break ; | |
4213 TEVENT (NotifyAll - Transfer1) ; | |
4214 ++Tally ; | |
4215 | |
4216 // Disposition - what might we do with iterator ? | |
4217 // a. add it directly to the EntryList - either tail or head. | |
4218 // b. push it onto the front of the _cxq. | |
4219 // For now we use (a). | |
4220 // | |
4221 // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset | |
4222 // to the EntryList. This could be done more efficiently with a single bulk transfer, | |
4223 // but in practice it's not time-critical. Beware too, that in prepend-mode we invert the | |
4224 // order of the waiters. Lets say that the waitset is "ABCD" and the EntryList is "XYZ". | |
4225 // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will | |
4226 // be "DCBAXYZ". | |
4227 | |
4228 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; | |
4229 guarantee (iterator->_notified == 0, "invariant") ; | |
4230 iterator->_notified = 1 ; | |
4231 if (Policy != 4) { | |
4232 iterator->TState = ObjectWaiter::TS_ENTER ; | |
4233 } | |
4234 | |
4235 ObjectWaiter * List = _EntryList ; | |
4236 if (List != NULL) { | |
4237 assert (List->_prev == NULL, "invariant") ; | |
4238 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; | |
4239 assert (List != iterator, "invariant") ; | |
4240 } | |
4241 | |
4242 if (Policy == 0) { // prepend to EntryList | |
4243 if (List == NULL) { | |
4244 iterator->_next = iterator->_prev = NULL ; | |
4245 _EntryList = iterator ; | |
4246 } else { | |
4247 List->_prev = iterator ; | |
4248 iterator->_next = List ; | |
4249 iterator->_prev = NULL ; | |
4250 _EntryList = iterator ; | |
4251 } | |
4252 } else | |
4253 if (Policy == 1) { // append to EntryList | |
4254 if (List == NULL) { | |
4255 iterator->_next = iterator->_prev = NULL ; | |
4256 _EntryList = iterator ; | |
4257 } else { | |
4258 // CONSIDER: finding the tail currently requires a linear-time walk of | |
4259 // the EntryList. We can make tail access constant-time by converting to | |
4260 // a CDLL instead of using our current DLL. | |
4261 ObjectWaiter * Tail ; | |
4262 for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; | |
4263 assert (Tail != NULL && Tail->_next == NULL, "invariant") ; | |
4264 Tail->_next = iterator ; | |
4265 iterator->_prev = Tail ; | |
4266 iterator->_next = NULL ; | |
4267 } | |
4268 } else | |
4269 if (Policy == 2) { // prepend to cxq | |
4270 // prepend to cxq | |
4271 iterator->TState = ObjectWaiter::TS_CXQ ; | |
4272 for (;;) { | |
4273 ObjectWaiter * Front = _cxq ; | |
4274 iterator->_next = Front ; | |
4275 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { | |
4276 break ; | |
4277 } | |
4278 } | |
4279 } else | |
4280 if (Policy == 3) { // append to cxq | |
4281 iterator->TState = ObjectWaiter::TS_CXQ ; | |
4282 for (;;) { | |
4283 ObjectWaiter * Tail ; | |
4284 Tail = _cxq ; | |
4285 if (Tail == NULL) { | |
4286 iterator->_next = NULL ; | |
4287 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { | |
4288 break ; | |
4289 } | |
4290 } else { | |
4291 while (Tail->_next != NULL) Tail = Tail->_next ; | |
4292 Tail->_next = iterator ; | |
4293 iterator->_prev = Tail ; | |
4294 iterator->_next = NULL ; | |
4295 break ; | |
4296 } | |
4297 } | |
4298 } else { | |
4299 ParkEvent * ev = iterator->_event ; | |
4300 iterator->TState = ObjectWaiter::TS_RUN ; | |
4301 OrderAccess::fence() ; | |
4302 ev->unpark() ; | |
4303 } | |
4304 | |
4305 if (Policy < 4) { | |
4306 iterator->wait_reenter_begin(this); | |
4307 } | |
4308 | |
4309 // _WaitSetLock protects the wait queue, not the EntryList. We could | |
4310 // move the add-to-EntryList operation, above, outside the critical section | |
4311 // protected by _WaitSetLock. In practice that's not useful. With the | |
4312 // exception of wait() timeouts and interrupts the monitor owner | |
4313 // is the only thread that grabs _WaitSetLock. There's almost no contention | |
4314 // on _WaitSetLock so it's not profitable to reduce the length of the | |
4315 // critical section. | |
4316 } | |
4317 | |
4318 Thread::SpinRelease (&_WaitSetLock) ; | |
4319 | |
4320 if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) { | |
4321 ObjectSynchronizer::_sync_Notifications->inc(Tally) ; | |
4322 } | |
4323 } | |
4324 | |
4325 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception. | |
4326 // TODO-FIXME: remove check_slow() -- it's likely dead. | |
4327 | |
4328 void ObjectMonitor::check_slow(TRAPS) { | |
4329 TEVENT (check_slow - throw IMSX) ; | |
4330 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner"); | |
4331 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner"); | |
4332 } | |
4333 | |
4334 | |
4335 // ------------------------------------------------------------------------- | |
4336 // The raw monitor subsystem is entirely distinct from normal | |
4337 // java-synchronization or jni-synchronization. raw monitors are not | |
4338 // associated with objects. They can be implemented in any manner | |
4339 // that makes sense. The original implementors decided to piggy-back | |
4340 // the raw-monitor implementation on the existing Java objectMonitor mechanism. | |
4341 // This flaw needs to fixed. We should reimplement raw monitors as sui-generis. | |
4342 // Specifically, we should not implement raw monitors via java monitors. | |
4343 // Time permitting, we should disentangle and deconvolve the two implementations | |
4344 // and move the resulting raw monitor implementation over to the JVMTI directories. | |
4345 // Ideally, the raw monitor implementation would be built on top of | |
4346 // park-unpark and nothing else. | |
4347 // | |
4348 // raw monitors are used mainly by JVMTI | |
4349 // The raw monitor implementation borrows the ObjectMonitor structure, | |
4350 // but the operators are degenerate and extremely simple. | |
4351 // | |
4352 // Mixed use of a single objectMonitor instance -- as both a raw monitor | |
4353 // and a normal java monitor -- is not permissible. | |
4354 // | |
4355 // Note that we use the single RawMonitor_lock to protect queue operations for | |
4356 // _all_ raw monitors. This is a scalability impediment, but since raw monitor usage | |
4357 // is deprecated and rare, this is not of concern. The RawMonitor_lock can not | |
4358 // be held indefinitely. The critical sections must be short and bounded. | |
4359 // | |
4360 // ------------------------------------------------------------------------- | |
4361 | |
4362 int ObjectMonitor::SimpleEnter (Thread * Self) { | |
4363 for (;;) { | |
4364 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { | |
4365 return OS_OK ; | |
4366 } | |
4367 | |
4368 ObjectWaiter Node (Self) ; | |
4369 Self->_ParkEvent->reset() ; // strictly optional | |
4370 Node.TState = ObjectWaiter::TS_ENTER ; | |
4371 | |
4372 RawMonitor_lock->lock_without_safepoint_check() ; | |
4373 Node._next = _EntryList ; | |
4374 _EntryList = &Node ; | |
4375 OrderAccess::fence() ; | |
4376 if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { | |
4377 _EntryList = Node._next ; | |
4378 RawMonitor_lock->unlock() ; | |
4379 return OS_OK ; | |
4380 } | |
4381 RawMonitor_lock->unlock() ; | |
4382 while (Node.TState == ObjectWaiter::TS_ENTER) { | |
4383 Self->_ParkEvent->park() ; | |
4384 } | |
4385 } | |
4386 } | |
4387 | |
4388 int ObjectMonitor::SimpleExit (Thread * Self) { | |
4389 guarantee (_owner == Self, "invariant") ; | |
4390 OrderAccess::release_store_ptr (&_owner, NULL) ; | |
4391 OrderAccess::fence() ; | |
4392 if (_EntryList == NULL) return OS_OK ; | |
4393 ObjectWaiter * w ; | |
4394 | |
4395 RawMonitor_lock->lock_without_safepoint_check() ; | |
4396 w = _EntryList ; | |
4397 if (w != NULL) { | |
4398 _EntryList = w->_next ; | |
4399 } | |
4400 RawMonitor_lock->unlock() ; | |
4401 if (w != NULL) { | |
4402 guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ; | |
4403 ParkEvent * ev = w->_event ; | |
4404 w->TState = ObjectWaiter::TS_RUN ; | |
4405 OrderAccess::fence() ; | |
4406 ev->unpark() ; | |
4407 } | |
4408 return OS_OK ; | |
4409 } | |
4410 | |
4411 int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) { | |
4412 guarantee (_owner == Self , "invariant") ; | |
4413 guarantee (_recursions == 0, "invariant") ; | |
4414 | |
4415 ObjectWaiter Node (Self) ; | |
4416 Node._notified = 0 ; | |
4417 Node.TState = ObjectWaiter::TS_WAIT ; | |
4418 | |
4419 RawMonitor_lock->lock_without_safepoint_check() ; | |
4420 Node._next = _WaitSet ; | |
4421 _WaitSet = &Node ; | |
4422 RawMonitor_lock->unlock() ; | |
4423 | |
4424 SimpleExit (Self) ; | |
4425 guarantee (_owner != Self, "invariant") ; | |
4426 | |
4427 int ret = OS_OK ; | |
4428 if (millis <= 0) { | |
4429 Self->_ParkEvent->park(); | |
4430 } else { | |
4431 ret = Self->_ParkEvent->park(millis); | |
4432 } | |
4433 | |
4434 // If thread still resides on the waitset then unlink it. | |
4435 // Double-checked locking -- the usage is safe in this context | |
4436 // as we TState is volatile and the lock-unlock operators are | |
4437 // serializing (barrier-equivalent). | |
4438 | |
4439 if (Node.TState == ObjectWaiter::TS_WAIT) { | |
4440 RawMonitor_lock->lock_without_safepoint_check() ; | |
4441 if (Node.TState == ObjectWaiter::TS_WAIT) { | |
4442 // Simple O(n) unlink, but performance isn't critical here. | |
4443 ObjectWaiter * p ; | |
4444 ObjectWaiter * q = NULL ; | |
4445 for (p = _WaitSet ; p != &Node; p = p->_next) { | |
4446 q = p ; | |
4447 } | |
4448 guarantee (p == &Node, "invariant") ; | |
4449 if (q == NULL) { | |
4450 guarantee (p == _WaitSet, "invariant") ; | |
4451 _WaitSet = p->_next ; | |
4452 } else { | |
4453 guarantee (p == q->_next, "invariant") ; | |
4454 q->_next = p->_next ; | |
4455 } | |
4456 Node.TState = ObjectWaiter::TS_RUN ; | |
4457 } | |
4458 RawMonitor_lock->unlock() ; | |
4459 } | |
4460 | |
4461 guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ; | |
4462 SimpleEnter (Self) ; | |
4463 | |
4464 guarantee (_owner == Self, "invariant") ; | |
4465 guarantee (_recursions == 0, "invariant") ; | |
4466 return ret ; | |
4467 } | |
4468 | |
4469 int ObjectMonitor::SimpleNotify (Thread * Self, bool All) { | |
4470 guarantee (_owner == Self, "invariant") ; | |
4471 if (_WaitSet == NULL) return OS_OK ; | |
4472 | |
4473 // We have two options: | |
4474 // A. Transfer the threads from the WaitSet to the EntryList | |
4475 // B. Remove the thread from the WaitSet and unpark() it. | |
4476 // | |
4477 // We use (B), which is crude and results in lots of futile | |
4478 // context switching. In particular (B) induces lots of contention. | |
4479 | |
4480 ParkEvent * ev = NULL ; // consider using a small auto array ... | |
4481 RawMonitor_lock->lock_without_safepoint_check() ; | |
4482 for (;;) { | |
4483 ObjectWaiter * w = _WaitSet ; | |
4484 if (w == NULL) break ; | |
4485 _WaitSet = w->_next ; | |
4486 if (ev != NULL) { ev->unpark(); ev = NULL; } | |
4487 ev = w->_event ; | |
4488 OrderAccess::loadstore() ; | |
4489 w->TState = ObjectWaiter::TS_RUN ; | |
4490 OrderAccess::storeload(); | |
4491 if (!All) break ; | |
4492 } | |
4493 RawMonitor_lock->unlock() ; | |
4494 if (ev != NULL) ev->unpark(); | |
4495 return OS_OK ; | |
4496 } | |
4497 | |
4498 // Any JavaThread will enter here with state _thread_blocked | |
4499 int ObjectMonitor::raw_enter(TRAPS) { | |
4500 TEVENT (raw_enter) ; | |
4501 void * Contended ; | |
4502 | |
4503 // don't enter raw monitor if thread is being externally suspended, it will | |
4504 // surprise the suspender if a "suspended" thread can still enter monitor | |
4505 JavaThread * jt = (JavaThread *)THREAD; | |
4506 if (THREAD->is_Java_thread()) { | |
4507 jt->SR_lock()->lock_without_safepoint_check(); | |
4508 while (jt->is_external_suspend()) { | |
4509 jt->SR_lock()->unlock(); | |
4510 jt->java_suspend_self(); | |
4511 jt->SR_lock()->lock_without_safepoint_check(); | |
4512 } | |
4513 // guarded by SR_lock to avoid racing with new external suspend requests. | |
4514 Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; | |
4515 jt->SR_lock()->unlock(); | |
4516 } else { | |
4517 Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; | |
4518 } | |
4519 | |
4520 if (Contended == THREAD) { | |
4521 _recursions ++ ; | |
4522 return OM_OK ; | |
4523 } | |
4524 | |
4525 if (Contended == NULL) { | |
4526 guarantee (_owner == THREAD, "invariant") ; | |
4527 guarantee (_recursions == 0, "invariant") ; | |
4528 return OM_OK ; | |
4529 } | |
4530 | |
4531 THREAD->set_current_pending_monitor(this); | |
4532 | |
4533 if (!THREAD->is_Java_thread()) { | |
4534 // No other non-Java threads besides VM thread would acquire | |
4535 // a raw monitor. | |
4536 assert(THREAD->is_VM_thread(), "must be VM thread"); | |
4537 SimpleEnter (THREAD) ; | |
4538 } else { | |
4539 guarantee (jt->thread_state() == _thread_blocked, "invariant") ; | |
4540 for (;;) { | |
4541 jt->set_suspend_equivalent(); | |
4542 // cleared by handle_special_suspend_equivalent_condition() or | |
4543 // java_suspend_self() | |
4544 SimpleEnter (THREAD) ; | |
4545 | |
4546 // were we externally suspended while we were waiting? | |
4547 if (!jt->handle_special_suspend_equivalent_condition()) break ; | |
4548 | |
4549 // This thread was externally suspended | |
4550 // | |
4551 // This logic isn't needed for JVMTI raw monitors, | |
4552 // but doesn't hurt just in case the suspend rules change. This | |
4553 // logic is needed for the ObjectMonitor.wait() reentry phase. | |
4554 // We have reentered the contended monitor, but while we were | |
4555 // waiting another thread suspended us. We don't want to reenter | |
4556 // the monitor while suspended because that would surprise the | |
4557 // thread that suspended us. | |
4558 // | |
4559 // Drop the lock - | |
4560 SimpleExit (THREAD) ; | |
4561 | |
4562 jt->java_suspend_self(); | |
4563 } | |
4564 | |
4565 assert(_owner == THREAD, "Fatal error with monitor owner!"); | |
4566 assert(_recursions == 0, "Fatal error with monitor recursions!"); | |
4567 } | |
4568 | |
4569 THREAD->set_current_pending_monitor(NULL); | |
4570 guarantee (_recursions == 0, "invariant") ; | |
4571 return OM_OK; | |
4572 } | |
4573 | |
4574 // Used mainly for JVMTI raw monitor implementation | |
4575 // Also used for ObjectMonitor::wait(). | |
4576 int ObjectMonitor::raw_exit(TRAPS) { | |
4577 TEVENT (raw_exit) ; | |
4578 if (THREAD != _owner) { | |
4579 return OM_ILLEGAL_MONITOR_STATE; | |
4580 } | |
4581 if (_recursions > 0) { | |
4582 --_recursions ; | |
4583 return OM_OK ; | |
4584 } | |
4585 | |
4586 void * List = _EntryList ; | |
4587 SimpleExit (THREAD) ; | |
4588 | |
4589 return OM_OK; | |
4590 } | |
4591 | |
4592 // Used for JVMTI raw monitor implementation. | |
4593 // All JavaThreads will enter here with state _thread_blocked | |
4594 | |
4595 int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) { | |
4596 TEVENT (raw_wait) ; | |
4597 if (THREAD != _owner) { | |
4598 return OM_ILLEGAL_MONITOR_STATE; | |
4599 } | |
4600 | |
4601 // To avoid spurious wakeups we reset the parkevent -- This is strictly optional. | |
4602 // The caller must be able to tolerate spurious returns from raw_wait(). | |
4603 THREAD->_ParkEvent->reset() ; | |
4604 OrderAccess::fence() ; | |
4605 | |
4606 // check interrupt event | |
4607 if (interruptible && Thread::is_interrupted(THREAD, true)) { | |
4608 return OM_INTERRUPTED; | |
4609 } | |
4610 | |
4611 intptr_t save = _recursions ; | |
4612 _recursions = 0 ; | |
4613 _waiters ++ ; | |
4614 if (THREAD->is_Java_thread()) { | |
4615 guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ; | |
4616 ((JavaThread *)THREAD)->set_suspend_equivalent(); | |
4617 } | |
4618 int rv = SimpleWait (THREAD, millis) ; | |
4619 _recursions = save ; | |
4620 _waiters -- ; | |
4621 | |
4622 guarantee (THREAD == _owner, "invariant") ; | |
4623 if (THREAD->is_Java_thread()) { | |
4624 JavaThread * jSelf = (JavaThread *) THREAD ; | |
4625 for (;;) { | |
4626 if (!jSelf->handle_special_suspend_equivalent_condition()) break ; | |
4627 SimpleExit (THREAD) ; | |
4628 jSelf->java_suspend_self(); | |
4629 SimpleEnter (THREAD) ; | |
4630 jSelf->set_suspend_equivalent() ; | |
4631 } | |
4632 } | |
4633 guarantee (THREAD == _owner, "invariant") ; | |
4634 | |
4635 if (interruptible && Thread::is_interrupted(THREAD, true)) { | |
4636 return OM_INTERRUPTED; | |
4637 } | |
4638 return OM_OK ; | |
4639 } | |
4640 | |
4641 int ObjectMonitor::raw_notify(TRAPS) { | |
4642 TEVENT (raw_notify) ; | |
4643 if (THREAD != _owner) { | |
4644 return OM_ILLEGAL_MONITOR_STATE; | |
4645 } | |
4646 SimpleNotify (THREAD, false) ; | |
4647 return OM_OK; | |
4648 } | |
4649 | |
4650 int ObjectMonitor::raw_notifyAll(TRAPS) { | |
4651 TEVENT (raw_notifyAll) ; | |
4652 if (THREAD != _owner) { | |
4653 return OM_ILLEGAL_MONITOR_STATE; | |
4654 } | |
4655 SimpleNotify (THREAD, true) ; | |
4656 return OM_OK; | |
4657 } | |
4658 | |
4659 #ifndef PRODUCT | |
4660 void ObjectMonitor::verify() { | |
4661 } | |
4662 | |
4663 void ObjectMonitor::print() { | |
4664 } | |
4665 #endif | |
4666 | |
4667 //------------------------------------------------------------------------------ | |
4668 // Non-product code | |
4669 | |
4670 #ifndef PRODUCT | |
4671 | |
4672 void ObjectSynchronizer::trace_locking(Handle locking_obj, bool is_compiled, | |
4673 bool is_method, bool is_locking) { | |
4674 // Don't know what to do here | |
4675 } | |
4676 | |
4677 // Verify all monitors in the monitor cache, the verification is weak. | |
4678 void ObjectSynchronizer::verify() { | |
4679 ObjectMonitor* block = gBlockList; | |
4680 ObjectMonitor* mid; | |
4681 while (block) { | |
4682 assert(block->object() == CHAINMARKER, "must be a block header"); | |
4683 for (int i = 1; i < _BLOCKSIZE; i++) { | |
4684 mid = block + i; | |
4685 oop object = (oop) mid->object(); | |
4686 if (object != NULL) { | |
4687 mid->verify(); | |
4688 } | |
4689 } | |
4690 block = (ObjectMonitor*) block->FreeNext; | |
4691 } | |
4692 } | |
4693 | |
4694 // Check if monitor belongs to the monitor cache | |
4695 // The list is grow-only so it's *relatively* safe to traverse | |
4696 // the list of extant blocks without taking a lock. | |
4697 | |
4698 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { | |
4699 ObjectMonitor* block = gBlockList; | |
4700 | |
4701 while (block) { | |
4702 assert(block->object() == CHAINMARKER, "must be a block header"); | |
4703 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { | |
4704 address mon = (address) monitor; | |
4705 address blk = (address) block; | |
4706 size_t diff = mon - blk; | |
4707 assert((diff % sizeof(ObjectMonitor)) == 0, "check"); | |
4708 return 1; | |
4709 } | |
4710 block = (ObjectMonitor*) block->FreeNext; | |
4711 } | |
4712 return 0; | |
4713 } | |
4714 | |
4715 #endif |