0
|
1 /*
|
|
2 * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 # include "incls/_precompiled.incl"
|
|
26 # include "incls/_synchronizer.cpp.incl"
|
|
27
|
|
28 #if defined(__GNUC__) && !defined(IA64)
|
|
29 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
|
|
30 #define ATTR __attribute__((noinline))
|
|
31 #else
|
|
32 #define ATTR
|
|
33 #endif
|
|
34
|
|
35 // Native markword accessors for synchronization and hashCode().
|
|
36 //
|
|
37 // The "core" versions of monitor enter and exit reside in this file.
|
|
38 // The interpreter and compilers contain specialized transliterated
|
|
39 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
|
|
40 // for instance. If you make changes here, make sure to modify the
|
|
41 // interpreter, and both C1 and C2 fast-path inline locking code emission.
|
|
42 //
|
|
43 // TODO: merge the objectMonitor and synchronizer classes.
|
|
44 //
|
|
45 // -----------------------------------------------------------------------------
|
|
46
|
|
47 #ifdef DTRACE_ENABLED
|
|
48
|
|
49 // Only bother with this argument setup if dtrace is available
|
|
50 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
|
|
51
|
|
52 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
|
|
53 jlong, uintptr_t, char*, int, long);
|
|
54 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
|
|
55 jlong, uintptr_t, char*, int);
|
|
56 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
|
|
57 jlong, uintptr_t, char*, int);
|
|
58 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
|
|
59 jlong, uintptr_t, char*, int);
|
|
60 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
|
|
61 jlong, uintptr_t, char*, int);
|
|
62 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
|
|
63 jlong, uintptr_t, char*, int);
|
|
64 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
|
|
65 jlong, uintptr_t, char*, int);
|
|
66
|
|
67 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread) \
|
|
68 char* bytes = NULL; \
|
|
69 int len = 0; \
|
|
70 jlong jtid = SharedRuntime::get_java_tid(thread); \
|
|
71 symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name(); \
|
|
72 if (klassname != NULL) { \
|
|
73 bytes = (char*)klassname->bytes(); \
|
|
74 len = klassname->utf8_length(); \
|
|
75 }
|
|
76
|
|
77 #define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis) \
|
|
78 { \
|
|
79 if (DTraceMonitorProbes) { \
|
|
80 DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \
|
|
81 HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \
|
|
82 (monitor), bytes, len, (millis)); \
|
|
83 } \
|
|
84 }
|
|
85
|
|
86 #define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread) \
|
|
87 { \
|
|
88 if (DTraceMonitorProbes) { \
|
|
89 DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \
|
|
90 HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \
|
|
91 (uintptr_t)(monitor), bytes, len); \
|
|
92 } \
|
|
93 }
|
|
94
|
|
95 #else // ndef DTRACE_ENABLED
|
|
96
|
|
97 #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon) {;}
|
|
98 #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon) {;}
|
|
99
|
|
100 #endif // ndef DTRACE_ENABLED
|
|
101
|
|
102 // ObjectWaiter serves as a "proxy" or surrogate thread.
|
|
103 // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
|
|
104 // ParkEvent instead. Beware, however, that the JVMTI code
|
|
105 // knows about ObjectWaiters, so we'll have to reconcile that code.
|
|
106 // See next_waiter(), first_waiter(), etc.
|
|
107
|
|
108 class ObjectWaiter : public StackObj {
|
|
109 public:
|
|
110 enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
|
|
111 enum Sorted { PREPEND, APPEND, SORTED } ;
|
|
112 ObjectWaiter * volatile _next;
|
|
113 ObjectWaiter * volatile _prev;
|
|
114 Thread* _thread;
|
|
115 ParkEvent * _event;
|
|
116 volatile int _notified ;
|
|
117 volatile TStates TState ;
|
|
118 Sorted _Sorted ; // List placement disposition
|
|
119 bool _active ; // Contention monitoring is enabled
|
|
120 public:
|
|
121 ObjectWaiter(Thread* thread) {
|
|
122 _next = NULL;
|
|
123 _prev = NULL;
|
|
124 _notified = 0;
|
|
125 TState = TS_RUN ;
|
|
126 _thread = thread;
|
|
127 _event = thread->_ParkEvent ;
|
|
128 _active = false;
|
|
129 assert (_event != NULL, "invariant") ;
|
|
130 }
|
|
131
|
|
132 void wait_reenter_begin(ObjectMonitor *mon) {
|
|
133 JavaThread *jt = (JavaThread *)this->_thread;
|
|
134 _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
|
|
135 }
|
|
136
|
|
137 void wait_reenter_end(ObjectMonitor *mon) {
|
|
138 JavaThread *jt = (JavaThread *)this->_thread;
|
|
139 JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
|
|
140 }
|
|
141 };
|
|
142
|
|
143 enum ManifestConstants {
|
|
144 ClearResponsibleAtSTW = 0,
|
|
145 MaximumRecheckInterval = 1000
|
|
146 } ;
|
|
147
|
|
148
|
|
149 #undef TEVENT
|
|
150 #define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
|
|
151
|
|
152 #define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
|
|
153
|
|
154 #undef TEVENT
|
|
155 #define TEVENT(nom) {;}
|
|
156
|
|
157 // Performance concern:
|
|
158 // OrderAccess::storestore() calls release() which STs 0 into the global volatile
|
|
159 // OrderAccess::Dummy variable. This store is unnecessary for correctness.
|
|
160 // Many threads STing into a common location causes considerable cache migration
|
|
161 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore()
|
|
162 // until it's repaired. In some cases OrderAccess::fence() -- which incurs local
|
|
163 // latency on the executing processor -- is a better choice as it scales on SMP
|
|
164 // systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a
|
|
165 // discussion of coherency costs. Note that all our current reference platforms
|
|
166 // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC.
|
|
167 //
|
|
168 // As a general policy we use "volatile" to control compiler-based reordering
|
|
169 // and explicit fences (barriers) to control for architectural reordering performed
|
|
170 // by the CPU(s) or platform.
|
|
171
|
|
172 static int MBFence (int x) { OrderAccess::fence(); return x; }
|
|
173
|
|
174 struct SharedGlobals {
|
|
175 // These are highly shared mostly-read variables.
|
|
176 // To avoid false-sharing they need to be the sole occupants of a $ line.
|
|
177 double padPrefix [8];
|
|
178 volatile int stwRandom ;
|
|
179 volatile int stwCycle ;
|
|
180
|
|
181 // Hot RW variables -- Sequester to avoid false-sharing
|
|
182 double padSuffix [16];
|
|
183 volatile int hcSequence ;
|
|
184 double padFinal [8] ;
|
|
185 } ;
|
|
186
|
|
187 static SharedGlobals GVars ;
|
|
188
|
|
189
|
|
190 // Tunables ...
|
|
191 // The knob* variables are effectively final. Once set they should
|
|
192 // never be modified hence. Consider using __read_mostly with GCC.
|
|
193
|
|
194 static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins
|
|
195 static int Knob_HandOff = 0 ;
|
|
196 static int Knob_Verbose = 0 ;
|
|
197 static int Knob_ReportSettings = 0 ;
|
|
198
|
|
199 static int Knob_SpinLimit = 5000 ; // derived by an external tool -
|
|
200 static int Knob_SpinBase = 0 ; // Floor AKA SpinMin
|
|
201 static int Knob_SpinBackOff = 0 ; // spin-loop backoff
|
|
202 static int Knob_CASPenalty = -1 ; // Penalty for failed CAS
|
|
203 static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change
|
|
204 static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field
|
|
205 static int Knob_SpinEarly = 1 ;
|
|
206 static int Knob_SuccEnabled = 1 ; // futile wake throttling
|
|
207 static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one
|
|
208 static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs
|
|
209 static int Knob_Bonus = 100 ; // spin success bonus
|
|
210 static int Knob_BonusB = 100 ; // spin success bonus
|
|
211 static int Knob_Penalty = 200 ; // spin failure penalty
|
|
212 static int Knob_Poverty = 1000 ;
|
|
213 static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park()
|
|
214 static int Knob_FixedSpin = 0 ;
|
|
215 static int Knob_OState = 3 ; // Spinner checks thread state of _owner
|
|
216 static int Knob_UsePause = 1 ;
|
|
217 static int Knob_ExitPolicy = 0 ;
|
|
218 static int Knob_PreSpin = 10 ; // 20-100 likely better
|
|
219 static int Knob_ResetEvent = 0 ;
|
|
220 static int BackOffMask = 0 ;
|
|
221
|
|
222 static int Knob_FastHSSEC = 0 ;
|
|
223 static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee
|
|
224 static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline
|
|
225 static volatile int InitDone = 0 ;
|
|
226
|
|
227
|
|
228 // hashCode() generation :
|
|
229 //
|
|
230 // Possibilities:
|
|
231 // * MD5Digest of {obj,stwRandom}
|
|
232 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
|
|
233 // * A DES- or AES-style SBox[] mechanism
|
|
234 // * One of the Phi-based schemes, such as:
|
|
235 // 2654435761 = 2^32 * Phi (golden ratio)
|
|
236 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
|
|
237 // * A variation of Marsaglia's shift-xor RNG scheme.
|
|
238 // * (obj ^ stwRandom) is appealing, but can result
|
|
239 // in undesirable regularity in the hashCode values of adjacent objects
|
|
240 // (objects allocated back-to-back, in particular). This could potentially
|
|
241 // result in hashtable collisions and reduced hashtable efficiency.
|
|
242 // There are simple ways to "diffuse" the middle address bits over the
|
|
243 // generated hashCode values:
|
|
244 //
|
|
245
|
|
246 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
|
|
247 intptr_t value = 0 ;
|
|
248 if (hashCode == 0) {
|
|
249 // This form uses an unguarded global Park-Miller RNG,
|
|
250 // so it's possible for two threads to race and generate the same RNG.
|
|
251 // On MP system we'll have lots of RW access to a global, so the
|
|
252 // mechanism induces lots of coherency traffic.
|
|
253 value = os::random() ;
|
|
254 } else
|
|
255 if (hashCode == 1) {
|
|
256 // This variation has the property of being stable (idempotent)
|
|
257 // between STW operations. This can be useful in some of the 1-0
|
|
258 // synchronization schemes.
|
|
259 intptr_t addrBits = intptr_t(obj) >> 3 ;
|
|
260 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
|
|
261 } else
|
|
262 if (hashCode == 2) {
|
|
263 value = 1 ; // for sensitivity testing
|
|
264 } else
|
|
265 if (hashCode == 3) {
|
|
266 value = ++GVars.hcSequence ;
|
|
267 } else
|
|
268 if (hashCode == 4) {
|
|
269 value = intptr_t(obj) ;
|
|
270 } else {
|
|
271 // Marsaglia's xor-shift scheme with thread-specific state
|
|
272 // This is probably the best overall implementation -- we'll
|
|
273 // likely make this the default in future releases.
|
|
274 unsigned t = Self->_hashStateX ;
|
|
275 t ^= (t << 11) ;
|
|
276 Self->_hashStateX = Self->_hashStateY ;
|
|
277 Self->_hashStateY = Self->_hashStateZ ;
|
|
278 Self->_hashStateZ = Self->_hashStateW ;
|
|
279 unsigned v = Self->_hashStateW ;
|
|
280 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
|
|
281 Self->_hashStateW = v ;
|
|
282 value = v ;
|
|
283 }
|
|
284
|
|
285 value &= markOopDesc::hash_mask;
|
|
286 if (value == 0) value = 0xBAD ;
|
|
287 assert (value != markOopDesc::no_hash, "invariant") ;
|
|
288 TEVENT (hashCode: GENERATE) ;
|
|
289 return value;
|
|
290 }
|
|
291
|
|
292 void BasicLock::print_on(outputStream* st) const {
|
|
293 st->print("monitor");
|
|
294 }
|
|
295
|
|
296 void BasicLock::move_to(oop obj, BasicLock* dest) {
|
|
297 // Check to see if we need to inflate the lock. This is only needed
|
|
298 // if an object is locked using "this" lightweight monitor. In that
|
|
299 // case, the displaced_header() is unlocked, because the
|
|
300 // displaced_header() contains the header for the originally unlocked
|
|
301 // object. However the object could have already been inflated. But it
|
|
302 // does not matter, the inflation will just a no-op. For other cases,
|
|
303 // the displaced header will be either 0x0 or 0x3, which are location
|
|
304 // independent, therefore the BasicLock is free to move.
|
|
305 //
|
|
306 // During OSR we may need to relocate a BasicLock (which contains a
|
|
307 // displaced word) from a location in an interpreter frame to a
|
|
308 // new location in a compiled frame. "this" refers to the source
|
|
309 // basiclock in the interpreter frame. "dest" refers to the destination
|
|
310 // basiclock in the new compiled frame. We *always* inflate in move_to().
|
|
311 // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
|
|
312 // cause performance problems in code that makes heavy use of a small # of
|
|
313 // uncontended locks. (We'd inflate during OSR, and then sync performance
|
|
314 // would subsequently plummet because the thread would be forced thru the slow-path).
|
|
315 // This problem has been made largely moot on IA32 by inlining the inflated fast-path
|
|
316 // operations in Fast_Lock and Fast_Unlock in i486.ad.
|
|
317 //
|
|
318 // Note that there is a way to safely swing the object's markword from
|
|
319 // one stack location to another. This avoids inflation. Obviously,
|
|
320 // we need to ensure that both locations refer to the current thread's stack.
|
|
321 // There are some subtle concurrency issues, however, and since the benefit is
|
|
322 // is small (given the support for inflated fast-path locking in the fast_lock, etc)
|
|
323 // we'll leave that optimization for another time.
|
|
324
|
|
325 if (displaced_header()->is_neutral()) {
|
|
326 ObjectSynchronizer::inflate_helper(obj);
|
|
327 // WARNING: We can not put check here, because the inflation
|
|
328 // will not update the displaced header. Once BasicLock is inflated,
|
|
329 // no one should ever look at its content.
|
|
330 } else {
|
|
331 // Typically the displaced header will be 0 (recursive stack lock) or
|
|
332 // unused_mark. Naively we'd like to assert that the displaced mark
|
|
333 // value is either 0, neutral, or 3. But with the advent of the
|
|
334 // store-before-CAS avoidance in fast_lock/compiler_lock_object
|
|
335 // we can find any flavor mark in the displaced mark.
|
|
336 }
|
|
337 // [RGV] The next line appears to do nothing!
|
|
338 intptr_t dh = (intptr_t) displaced_header();
|
|
339 dest->set_displaced_header(displaced_header());
|
|
340 }
|
|
341
|
|
342 // -----------------------------------------------------------------------------
|
|
343
|
|
344 // standard constructor, allows locking failures
|
|
345 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
|
|
346 _dolock = doLock;
|
|
347 _thread = thread;
|
|
348 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
|
|
349 _obj = obj;
|
|
350
|
|
351 if (_dolock) {
|
|
352 TEVENT (ObjectLocker) ;
|
|
353
|
|
354 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
|
|
355 }
|
|
356 }
|
|
357
|
|
358 ObjectLocker::~ObjectLocker() {
|
|
359 if (_dolock) {
|
|
360 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
|
|
361 }
|
|
362 }
|
|
363
|
|
364 // -----------------------------------------------------------------------------
|
|
365
|
|
366
|
|
367 PerfCounter * ObjectSynchronizer::_sync_Inflations = NULL ;
|
|
368 PerfCounter * ObjectSynchronizer::_sync_Deflations = NULL ;
|
|
369 PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts = NULL ;
|
|
370 PerfCounter * ObjectSynchronizer::_sync_FutileWakeups = NULL ;
|
|
371 PerfCounter * ObjectSynchronizer::_sync_Parks = NULL ;
|
|
372 PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications = NULL ;
|
|
373 PerfCounter * ObjectSynchronizer::_sync_Notifications = NULL ;
|
|
374 PerfCounter * ObjectSynchronizer::_sync_PrivateA = NULL ;
|
|
375 PerfCounter * ObjectSynchronizer::_sync_PrivateB = NULL ;
|
|
376 PerfCounter * ObjectSynchronizer::_sync_SlowExit = NULL ;
|
|
377 PerfCounter * ObjectSynchronizer::_sync_SlowEnter = NULL ;
|
|
378 PerfCounter * ObjectSynchronizer::_sync_SlowNotify = NULL ;
|
|
379 PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll = NULL ;
|
|
380 PerfCounter * ObjectSynchronizer::_sync_FailedSpins = NULL ;
|
|
381 PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins = NULL ;
|
|
382 PerfCounter * ObjectSynchronizer::_sync_MonInCirculation = NULL ;
|
|
383 PerfCounter * ObjectSynchronizer::_sync_MonScavenged = NULL ;
|
|
384 PerfLongVariable * ObjectSynchronizer::_sync_MonExtant = NULL ;
|
|
385
|
|
386 // One-shot global initialization for the sync subsystem.
|
|
387 // We could also defer initialization and initialize on-demand
|
|
388 // the first time we call inflate(). Initialization would
|
|
389 // be protected - like so many things - by the MonitorCache_lock.
|
|
390
|
|
391 void ObjectSynchronizer::Initialize () {
|
|
392 static int InitializationCompleted = 0 ;
|
|
393 assert (InitializationCompleted == 0, "invariant") ;
|
|
394 InitializationCompleted = 1 ;
|
|
395 if (UsePerfData) {
|
|
396 EXCEPTION_MARK ;
|
|
397 #define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
|
|
398 #define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
|
|
399 NEWPERFCOUNTER(_sync_Inflations) ;
|
|
400 NEWPERFCOUNTER(_sync_Deflations) ;
|
|
401 NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
|
|
402 NEWPERFCOUNTER(_sync_FutileWakeups) ;
|
|
403 NEWPERFCOUNTER(_sync_Parks) ;
|
|
404 NEWPERFCOUNTER(_sync_EmptyNotifications) ;
|
|
405 NEWPERFCOUNTER(_sync_Notifications) ;
|
|
406 NEWPERFCOUNTER(_sync_SlowEnter) ;
|
|
407 NEWPERFCOUNTER(_sync_SlowExit) ;
|
|
408 NEWPERFCOUNTER(_sync_SlowNotify) ;
|
|
409 NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
|
|
410 NEWPERFCOUNTER(_sync_FailedSpins) ;
|
|
411 NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
|
|
412 NEWPERFCOUNTER(_sync_PrivateA) ;
|
|
413 NEWPERFCOUNTER(_sync_PrivateB) ;
|
|
414 NEWPERFCOUNTER(_sync_MonInCirculation) ;
|
|
415 NEWPERFCOUNTER(_sync_MonScavenged) ;
|
|
416 NEWPERFVARIABLE(_sync_MonExtant) ;
|
|
417 #undef NEWPERFCOUNTER
|
|
418 }
|
|
419 }
|
|
420
|
|
421 // Compile-time asserts
|
|
422 // When possible, it's better to catch errors deterministically at
|
|
423 // compile-time than at runtime. The down-side to using compile-time
|
|
424 // asserts is that error message -- often something about negative array
|
|
425 // indices -- is opaque.
|
|
426
|
|
427 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @%X\n", tag); }
|
|
428
|
|
429 void ObjectMonitor::ctAsserts() {
|
|
430 CTASSERT(offset_of (ObjectMonitor, _header) == 0);
|
|
431 }
|
|
432
|
|
433 static int Adjust (volatile int * adr, int dx) {
|
|
434 int v ;
|
|
435 for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
|
|
436 return v ;
|
|
437 }
|
|
438
|
|
439 // Ad-hoc mutual exclusion primitives: SpinLock and Mux
|
|
440 //
|
|
441 // We employ SpinLocks _only for low-contention, fixed-length
|
|
442 // short-duration critical sections where we're concerned
|
|
443 // about native mutex_t or HotSpot Mutex:: latency.
|
|
444 // The mux construct provides a spin-then-block mutual exclusion
|
|
445 // mechanism.
|
|
446 //
|
|
447 // Testing has shown that contention on the ListLock guarding gFreeList
|
|
448 // is common. If we implement ListLock as a simple SpinLock it's common
|
|
449 // for the JVM to devolve to yielding with little progress. This is true
|
|
450 // despite the fact that the critical sections protected by ListLock are
|
|
451 // extremely short.
|
|
452 //
|
|
453 // TODO-FIXME: ListLock should be of type SpinLock.
|
|
454 // We should make this a 1st-class type, integrated into the lock
|
|
455 // hierarchy as leaf-locks. Critically, the SpinLock structure
|
|
456 // should have sufficient padding to avoid false-sharing and excessive
|
|
457 // cache-coherency traffic.
|
|
458
|
|
459
|
|
460 typedef volatile int SpinLockT ;
|
|
461
|
|
462 void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
|
|
463 if (Atomic::cmpxchg (1, adr, 0) == 0) {
|
|
464 return ; // normal fast-path return
|
|
465 }
|
|
466
|
|
467 // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
|
|
468 TEVENT (SpinAcquire - ctx) ;
|
|
469 int ctr = 0 ;
|
|
470 int Yields = 0 ;
|
|
471 for (;;) {
|
|
472 while (*adr != 0) {
|
|
473 ++ctr ;
|
|
474 if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
|
|
475 if (Yields > 5) {
|
|
476 // Consider using a simple NakedSleep() instead.
|
|
477 // Then SpinAcquire could be called by non-JVM threads
|
|
478 Thread::current()->_ParkEvent->park(1) ;
|
|
479 } else {
|
|
480 os::NakedYield() ;
|
|
481 ++Yields ;
|
|
482 }
|
|
483 } else {
|
|
484 SpinPause() ;
|
|
485 }
|
|
486 }
|
|
487 if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
|
|
488 }
|
|
489 }
|
|
490
|
|
491 void Thread::SpinRelease (volatile int * adr) {
|
|
492 assert (*adr != 0, "invariant") ;
|
|
493 OrderAccess::fence() ; // guarantee at least release consistency.
|
|
494 // Roach-motel semantics.
|
|
495 // It's safe if subsequent LDs and STs float "up" into the critical section,
|
|
496 // but prior LDs and STs within the critical section can't be allowed
|
|
497 // to reorder or float past the ST that releases the lock.
|
|
498 *adr = 0 ;
|
|
499 }
|
|
500
|
|
501 // muxAcquire and muxRelease:
|
|
502 //
|
|
503 // * muxAcquire and muxRelease support a single-word lock-word construct.
|
|
504 // The LSB of the word is set IFF the lock is held.
|
|
505 // The remainder of the word points to the head of a singly-linked list
|
|
506 // of threads blocked on the lock.
|
|
507 //
|
|
508 // * The current implementation of muxAcquire-muxRelease uses its own
|
|
509 // dedicated Thread._MuxEvent instance. If we're interested in
|
|
510 // minimizing the peak number of extant ParkEvent instances then
|
|
511 // we could eliminate _MuxEvent and "borrow" _ParkEvent as long
|
|
512 // as certain invariants were satisfied. Specifically, care would need
|
|
513 // to be taken with regards to consuming unpark() "permits".
|
|
514 // A safe rule of thumb is that a thread would never call muxAcquire()
|
|
515 // if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
|
|
516 // park(). Otherwise the _ParkEvent park() operation in muxAcquire() could
|
|
517 // consume an unpark() permit intended for monitorenter, for instance.
|
|
518 // One way around this would be to widen the restricted-range semaphore
|
|
519 // implemented in park(). Another alternative would be to provide
|
|
520 // multiple instances of the PlatformEvent() for each thread. One
|
|
521 // instance would be dedicated to muxAcquire-muxRelease, for instance.
|
|
522 //
|
|
523 // * Usage:
|
|
524 // -- Only as leaf locks
|
|
525 // -- for short-term locking only as muxAcquire does not perform
|
|
526 // thread state transitions.
|
|
527 //
|
|
528 // Alternatives:
|
|
529 // * We could implement muxAcquire and muxRelease with MCS or CLH locks
|
|
530 // but with parking or spin-then-park instead of pure spinning.
|
|
531 // * Use Taura-Oyama-Yonenzawa locks.
|
|
532 // * It's possible to construct a 1-0 lock if we encode the lockword as
|
|
533 // (List,LockByte). Acquire will CAS the full lockword while Release
|
|
534 // will STB 0 into the LockByte. The 1-0 scheme admits stranding, so
|
|
535 // acquiring threads use timers (ParkTimed) to detect and recover from
|
|
536 // the stranding window. Thread/Node structures must be aligned on 256-byte
|
|
537 // boundaries by using placement-new.
|
|
538 // * Augment MCS with advisory back-link fields maintained with CAS().
|
|
539 // Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
|
|
540 // The validity of the backlinks must be ratified before we trust the value.
|
|
541 // If the backlinks are invalid the exiting thread must back-track through the
|
|
542 // the forward links, which are always trustworthy.
|
|
543 // * Add a successor indication. The LockWord is currently encoded as
|
|
544 // (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable
|
|
545 // to provide the usual futile-wakeup optimization.
|
|
546 // See RTStt for details.
|
|
547 // * Consider schedctl.sc_nopreempt to cover the critical section.
|
|
548 //
|
|
549
|
|
550
|
|
551 typedef volatile intptr_t MutexT ; // Mux Lock-word
|
|
552 enum MuxBits { LOCKBIT = 1 } ;
|
|
553
|
|
554 void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
|
|
555 intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
|
|
556 if (w == 0) return ;
|
|
557 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
|
558 return ;
|
|
559 }
|
|
560
|
|
561 TEVENT (muxAcquire - Contention) ;
|
|
562 ParkEvent * const Self = Thread::current()->_MuxEvent ;
|
|
563 assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
|
|
564 for (;;) {
|
|
565 int its = (os::is_MP() ? 100 : 0) + 1 ;
|
|
566
|
|
567 // Optional spin phase: spin-then-park strategy
|
|
568 while (--its >= 0) {
|
|
569 w = *Lock ;
|
|
570 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
|
571 return ;
|
|
572 }
|
|
573 }
|
|
574
|
|
575 Self->reset() ;
|
|
576 Self->OnList = intptr_t(Lock) ;
|
|
577 // The following fence() isn't _strictly necessary as the subsequent
|
|
578 // CAS() both serializes execution and ratifies the fetched *Lock value.
|
|
579 OrderAccess::fence();
|
|
580 for (;;) {
|
|
581 w = *Lock ;
|
|
582 if ((w & LOCKBIT) == 0) {
|
|
583 if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
|
584 Self->OnList = 0 ; // hygiene - allows stronger asserts
|
|
585 return ;
|
|
586 }
|
|
587 continue ; // Interference -- *Lock changed -- Just retry
|
|
588 }
|
|
589 assert (w & LOCKBIT, "invariant") ;
|
|
590 Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
|
|
591 if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
|
|
592 }
|
|
593
|
|
594 while (Self->OnList != 0) {
|
|
595 Self->park() ;
|
|
596 }
|
|
597 }
|
|
598 }
|
|
599
|
|
600 void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
|
|
601 intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
|
|
602 if (w == 0) return ;
|
|
603 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
|
604 return ;
|
|
605 }
|
|
606
|
|
607 TEVENT (muxAcquire - Contention) ;
|
|
608 ParkEvent * ReleaseAfter = NULL ;
|
|
609 if (ev == NULL) {
|
|
610 ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
|
|
611 }
|
|
612 assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
|
|
613 for (;;) {
|
|
614 guarantee (ev->OnList == 0, "invariant") ;
|
|
615 int its = (os::is_MP() ? 100 : 0) + 1 ;
|
|
616
|
|
617 // Optional spin phase: spin-then-park strategy
|
|
618 while (--its >= 0) {
|
|
619 w = *Lock ;
|
|
620 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
|
621 if (ReleaseAfter != NULL) {
|
|
622 ParkEvent::Release (ReleaseAfter) ;
|
|
623 }
|
|
624 return ;
|
|
625 }
|
|
626 }
|
|
627
|
|
628 ev->reset() ;
|
|
629 ev->OnList = intptr_t(Lock) ;
|
|
630 // The following fence() isn't _strictly necessary as the subsequent
|
|
631 // CAS() both serializes execution and ratifies the fetched *Lock value.
|
|
632 OrderAccess::fence();
|
|
633 for (;;) {
|
|
634 w = *Lock ;
|
|
635 if ((w & LOCKBIT) == 0) {
|
|
636 if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
|
|
637 ev->OnList = 0 ;
|
|
638 // We call ::Release while holding the outer lock, thus
|
|
639 // artificially lengthening the critical section.
|
|
640 // Consider deferring the ::Release() until the subsequent unlock(),
|
|
641 // after we've dropped the outer lock.
|
|
642 if (ReleaseAfter != NULL) {
|
|
643 ParkEvent::Release (ReleaseAfter) ;
|
|
644 }
|
|
645 return ;
|
|
646 }
|
|
647 continue ; // Interference -- *Lock changed -- Just retry
|
|
648 }
|
|
649 assert (w & LOCKBIT, "invariant") ;
|
|
650 ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
|
|
651 if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
|
|
652 }
|
|
653
|
|
654 while (ev->OnList != 0) {
|
|
655 ev->park() ;
|
|
656 }
|
|
657 }
|
|
658 }
|
|
659
|
|
660 // Release() must extract a successor from the list and then wake that thread.
|
|
661 // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
|
|
662 // similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based
|
|
663 // Release() would :
|
|
664 // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
|
|
665 // (B) Extract a successor from the private list "in-hand"
|
|
666 // (C) attempt to CAS() the residual back into *Lock over null.
|
|
667 // If there were any newly arrived threads and the CAS() would fail.
|
|
668 // In that case Release() would detach the RATs, re-merge the list in-hand
|
|
669 // with the RATs and repeat as needed. Alternately, Release() might
|
|
670 // detach and extract a successor, but then pass the residual list to the wakee.
|
|
671 // The wakee would be responsible for reattaching and remerging before it
|
|
672 // competed for the lock.
|
|
673 //
|
|
674 // Both "pop" and DMR are immune from ABA corruption -- there can be
|
|
675 // multiple concurrent pushers, but only one popper or detacher.
|
|
676 // This implementation pops from the head of the list. This is unfair,
|
|
677 // but tends to provide excellent throughput as hot threads remain hot.
|
|
678 // (We wake recently run threads first).
|
|
679
|
|
680 void Thread::muxRelease (volatile intptr_t * Lock) {
|
|
681 for (;;) {
|
|
682 const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
|
|
683 assert (w & LOCKBIT, "invariant") ;
|
|
684 if (w == LOCKBIT) return ;
|
|
685 ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
|
|
686 assert (List != NULL, "invariant") ;
|
|
687 assert (List->OnList == intptr_t(Lock), "invariant") ;
|
|
688 ParkEvent * nxt = List->ListNext ;
|
|
689
|
|
690 // The following CAS() releases the lock and pops the head element.
|
|
691 if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
|
|
692 continue ;
|
|
693 }
|
|
694 List->OnList = 0 ;
|
|
695 OrderAccess::fence() ;
|
|
696 List->unpark () ;
|
|
697 return ;
|
|
698 }
|
|
699 }
|
|
700
|
|
701 // ObjectMonitor Lifecycle
|
|
702 // -----------------------
|
|
703 // Inflation unlinks monitors from the global gFreeList and
|
|
704 // associates them with objects. Deflation -- which occurs at
|
|
705 // STW-time -- disassociates idle monitors from objects. Such
|
|
706 // scavenged monitors are returned to the gFreeList.
|
|
707 //
|
|
708 // The global list is protected by ListLock. All the critical sections
|
|
709 // are short and operate in constant-time.
|
|
710 //
|
|
711 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
|
|
712 //
|
|
713 // Lifecycle:
|
|
714 // -- unassigned and on the global free list
|
|
715 // -- unassigned and on a thread's private omFreeList
|
|
716 // -- assigned to an object. The object is inflated and the mark refers
|
|
717 // to the objectmonitor.
|
|
718 //
|
|
719 // TODO-FIXME:
|
|
720 //
|
|
721 // * We currently protect the gFreeList with a simple lock.
|
|
722 // An alternate lock-free scheme would be to pop elements from the gFreeList
|
|
723 // with CAS. This would be safe from ABA corruption as long we only
|
|
724 // recycled previously appearing elements onto the list in deflate_idle_monitors()
|
|
725 // at STW-time. Completely new elements could always be pushed onto the gFreeList
|
|
726 // with CAS. Elements that appeared previously on the list could only
|
|
727 // be installed at STW-time.
|
|
728 //
|
|
729 // * For efficiency and to help reduce the store-before-CAS penalty
|
|
730 // the objectmonitors on gFreeList or local free lists should be ready to install
|
|
731 // with the exception of _header and _object. _object can be set after inflation.
|
|
732 // In particular, keep all objectMonitors on a thread's private list in ready-to-install
|
|
733 // state with m.Owner set properly.
|
|
734 //
|
|
735 // * We could all diffuse contention by using multiple global (FreeList, Lock)
|
|
736 // pairs -- threads could use trylock() and a cyclic-scan strategy to search for
|
|
737 // an unlocked free list.
|
|
738 //
|
|
739 // * Add lifecycle tags and assert()s.
|
|
740 //
|
|
741 // * Be more consistent about when we clear an objectmonitor's fields:
|
|
742 // A. After extracting the objectmonitor from a free list.
|
|
743 // B. After adding an objectmonitor to a free list.
|
|
744 //
|
|
745
|
|
746 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
|
|
747 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
|
|
748 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
|
|
749 #define CHAINMARKER ((oop)-1)
|
|
750
|
|
751 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
|
|
752 // A large MAXPRIVATE value reduces both list lock contention
|
|
753 // and list coherency traffic, but also tends to increase the
|
|
754 // number of objectMonitors in circulation as well as the STW
|
|
755 // scavenge costs. As usual, we lean toward time in space-time
|
|
756 // tradeoffs.
|
|
757 const int MAXPRIVATE = 1024 ;
|
|
758 for (;;) {
|
|
759 ObjectMonitor * m ;
|
|
760
|
|
761 // 1: try to allocate from the thread's local omFreeList.
|
|
762 // Threads will attempt to allocate first from their local list, then
|
|
763 // from the global list, and only after those attempts fail will the thread
|
|
764 // attempt to instantiate new monitors. Thread-local free lists take
|
|
765 // heat off the ListLock and improve allocation latency, as well as reducing
|
|
766 // coherency traffic on the shared global list.
|
|
767 m = Self->omFreeList ;
|
|
768 if (m != NULL) {
|
|
769 Self->omFreeList = m->FreeNext ;
|
|
770 Self->omFreeCount -- ;
|
|
771 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
|
|
772 guarantee (m->object() == NULL, "invariant") ;
|
|
773 return m ;
|
|
774 }
|
|
775
|
|
776 // 2: try to allocate from the global gFreeList
|
|
777 // CONSIDER: use muxTry() instead of muxAcquire().
|
|
778 // If the muxTry() fails then drop immediately into case 3.
|
|
779 // If we're using thread-local free lists then try
|
|
780 // to reprovision the caller's free list.
|
|
781 if (gFreeList != NULL) {
|
|
782 // Reprovision the thread's omFreeList.
|
|
783 // Use bulk transfers to reduce the allocation rate and heat
|
|
784 // on various locks.
|
|
785 Thread::muxAcquire (&ListLock, "omAlloc") ;
|
|
786 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
|
|
787 ObjectMonitor * take = gFreeList ;
|
|
788 gFreeList = take->FreeNext ;
|
|
789 guarantee (take->object() == NULL, "invariant") ;
|
|
790 guarantee (!take->is_busy(), "invariant") ;
|
|
791 take->Recycle() ;
|
|
792 omRelease (Self, take) ;
|
|
793 }
|
|
794 Thread::muxRelease (&ListLock) ;
|
|
795 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
|
|
796 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
|
|
797 TEVENT (omFirst - reprovision) ;
|
|
798 continue ;
|
|
799 }
|
|
800
|
|
801 // 3: allocate a block of new ObjectMonitors
|
|
802 // Both the local and global free lists are empty -- resort to malloc().
|
|
803 // In the current implementation objectMonitors are TSM - immortal.
|
|
804 assert (_BLOCKSIZE > 1, "invariant") ;
|
|
805 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
|
|
806
|
|
807 // NOTE: (almost) no way to recover if allocation failed.
|
|
808 // We might be able to induce a STW safepoint and scavenge enough
|
|
809 // objectMonitors to permit progress.
|
|
810 if (temp == NULL) {
|
|
811 vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ;
|
|
812 }
|
|
813
|
|
814 // Format the block.
|
|
815 // initialize the linked list, each monitor points to its next
|
|
816 // forming the single linked free list, the very first monitor
|
|
817 // will points to next block, which forms the block list.
|
|
818 // The trick of using the 1st element in the block as gBlockList
|
|
819 // linkage should be reconsidered. A better implementation would
|
|
820 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
|
|
821
|
|
822 for (int i = 1; i < _BLOCKSIZE ; i++) {
|
|
823 temp[i].FreeNext = &temp[i+1];
|
|
824 }
|
|
825
|
|
826 // terminate the last monitor as the end of list
|
|
827 temp[_BLOCKSIZE - 1].FreeNext = NULL ;
|
|
828
|
|
829 // Element [0] is reserved for global list linkage
|
|
830 temp[0].set_object(CHAINMARKER);
|
|
831
|
|
832 // Consider carving out this thread's current request from the
|
|
833 // block in hand. This avoids some lock traffic and redundant
|
|
834 // list activity.
|
|
835
|
|
836 // Acquire the ListLock to manipulate BlockList and FreeList.
|
|
837 // An Oyama-Taura-Yonezawa scheme might be more efficient.
|
|
838 Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
|
|
839
|
|
840 // Add the new block to the list of extant blocks (gBlockList).
|
|
841 // The very first objectMonitor in a block is reserved and dedicated.
|
|
842 // It serves as blocklist "next" linkage.
|
|
843 temp[0].FreeNext = gBlockList;
|
|
844 gBlockList = temp;
|
|
845
|
|
846 // Add the new string of objectMonitors to the global free list
|
|
847 temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
|
|
848 gFreeList = temp + 1;
|
|
849 Thread::muxRelease (&ListLock) ;
|
|
850 TEVENT (Allocate block of monitors) ;
|
|
851 }
|
|
852 }
|
|
853
|
|
854 // Place "m" on the caller's private per-thread omFreeList.
|
|
855 // In practice there's no need to clamp or limit the number of
|
|
856 // monitors on a thread's omFreeList as the only time we'll call
|
|
857 // omRelease is to return a monitor to the free list after a CAS
|
|
858 // attempt failed. This doesn't allow unbounded #s of monitors to
|
|
859 // accumulate on a thread's free list.
|
|
860 //
|
|
861 // In the future the usage of omRelease() might change and monitors
|
|
862 // could migrate between free lists. In that case to avoid excessive
|
|
863 // accumulation we could limit omCount to (omProvision*2), otherwise return
|
|
864 // the objectMonitor to the global list. We should drain (return) in reasonable chunks.
|
|
865 // That is, *not* one-at-a-time.
|
|
866
|
|
867
|
|
868 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) {
|
|
869 guarantee (m->object() == NULL, "invariant") ;
|
|
870 m->FreeNext = Self->omFreeList ;
|
|
871 Self->omFreeList = m ;
|
|
872 Self->omFreeCount ++ ;
|
|
873 }
|
|
874
|
|
875 // Return the monitors of a moribund thread's local free list to
|
|
876 // the global free list. Typically a thread calls omFlush() when
|
|
877 // it's dying. We could also consider having the VM thread steal
|
|
878 // monitors from threads that have not run java code over a few
|
|
879 // consecutive STW safepoints. Relatedly, we might decay
|
|
880 // omFreeProvision at STW safepoints.
|
|
881 //
|
|
882 // We currently call omFlush() from the Thread:: dtor _after the thread
|
|
883 // has been excised from the thread list and is no longer a mutator.
|
|
884 // That means that omFlush() can run concurrently with a safepoint and
|
|
885 // the scavenge operator. Calling omFlush() from JavaThread::exit() might
|
|
886 // be a better choice as we could safely reason that that the JVM is
|
|
887 // not at a safepoint at the time of the call, and thus there could
|
|
888 // be not inopportune interleavings between omFlush() and the scavenge
|
|
889 // operator.
|
|
890
|
|
891 void ObjectSynchronizer::omFlush (Thread * Self) {
|
|
892 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
|
|
893 Self->omFreeList = NULL ;
|
|
894 if (List == NULL) return ;
|
|
895 ObjectMonitor * Tail = NULL ;
|
|
896 ObjectMonitor * s ;
|
|
897 for (s = List ; s != NULL ; s = s->FreeNext) {
|
|
898 Tail = s ;
|
|
899 guarantee (s->object() == NULL, "invariant") ;
|
|
900 guarantee (!s->is_busy(), "invariant") ;
|
|
901 s->set_owner (NULL) ; // redundant but good hygiene
|
|
902 TEVENT (omFlush - Move one) ;
|
|
903 }
|
|
904
|
|
905 guarantee (Tail != NULL && List != NULL, "invariant") ;
|
|
906 Thread::muxAcquire (&ListLock, "omFlush") ;
|
|
907 Tail->FreeNext = gFreeList ;
|
|
908 gFreeList = List ;
|
|
909 Thread::muxRelease (&ListLock) ;
|
|
910 TEVENT (omFlush) ;
|
|
911 }
|
|
912
|
|
913
|
|
914 // Get the next block in the block list.
|
|
915 static inline ObjectMonitor* next(ObjectMonitor* block) {
|
|
916 assert(block->object() == CHAINMARKER, "must be a block header");
|
|
917 block = block->FreeNext ;
|
|
918 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
|
|
919 return block;
|
|
920 }
|
|
921
|
|
922 // Fast path code shared by multiple functions
|
|
923 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
|
|
924 markOop mark = obj->mark();
|
|
925 if (mark->has_monitor()) {
|
|
926 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
|
|
927 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
|
|
928 return mark->monitor();
|
|
929 }
|
|
930 return ObjectSynchronizer::inflate(Thread::current(), obj);
|
|
931 }
|
|
932
|
|
933 // Note that we could encounter some performance loss through false-sharing as
|
|
934 // multiple locks occupy the same $ line. Padding might be appropriate.
|
|
935
|
|
936 #define NINFLATIONLOCKS 256
|
|
937 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
|
|
938
|
|
939 static markOop ReadStableMark (oop obj) {
|
|
940 markOop mark = obj->mark() ;
|
|
941 if (!mark->is_being_inflated()) {
|
|
942 return mark ; // normal fast-path return
|
|
943 }
|
|
944
|
|
945 int its = 0 ;
|
|
946 for (;;) {
|
|
947 markOop mark = obj->mark() ;
|
|
948 if (!mark->is_being_inflated()) {
|
|
949 return mark ; // normal fast-path return
|
|
950 }
|
|
951
|
|
952 // The object is being inflated by some other thread.
|
|
953 // The caller of ReadStableMark() must wait for inflation to complete.
|
|
954 // Avoid live-lock
|
|
955 // TODO: consider calling SafepointSynchronize::do_call_back() while
|
|
956 // spinning to see if there's a safepoint pending. If so, immediately
|
|
957 // yielding or blocking would be appropriate. Avoid spinning while
|
|
958 // there is a safepoint pending.
|
|
959 // TODO: add inflation contention performance counters.
|
|
960 // TODO: restrict the aggregate number of spinners.
|
|
961
|
|
962 ++its ;
|
|
963 if (its > 10000 || !os::is_MP()) {
|
|
964 if (its & 1) {
|
|
965 os::NakedYield() ;
|
|
966 TEVENT (Inflate: INFLATING - yield) ;
|
|
967 } else {
|
|
968 // Note that the following code attenuates the livelock problem but is not
|
|
969 // a complete remedy. A more complete solution would require that the inflating
|
|
970 // thread hold the associated inflation lock. The following code simply restricts
|
|
971 // the number of spinners to at most one. We'll have N-2 threads blocked
|
|
972 // on the inflationlock, 1 thread holding the inflation lock and using
|
|
973 // a yield/park strategy, and 1 thread in the midst of inflation.
|
|
974 // A more refined approach would be to change the encoding of INFLATING
|
|
975 // to allow encapsulation of a native thread pointer. Threads waiting for
|
|
976 // inflation to complete would use CAS to push themselves onto a singly linked
|
|
977 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag
|
|
978 // and calling park(). When inflation was complete the thread that accomplished inflation
|
|
979 // would detach the list and set the markword to inflated with a single CAS and
|
|
980 // then for each thread on the list, set the flag and unpark() the thread.
|
|
981 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
|
|
982 // wakes at most one thread whereas we need to wake the entire list.
|
|
983 int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
|
|
984 int YieldThenBlock = 0 ;
|
|
985 assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
|
|
986 assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
|
|
987 Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
|
|
988 while (obj->mark() == markOopDesc::INFLATING()) {
|
|
989 // Beware: NakedYield() is advisory and has almost no effect on some platforms
|
|
990 // so we periodically call Self->_ParkEvent->park(1).
|
|
991 // We use a mixed spin/yield/block mechanism.
|
|
992 if ((YieldThenBlock++) >= 16) {
|
|
993 Thread::current()->_ParkEvent->park(1) ;
|
|
994 } else {
|
|
995 os::NakedYield() ;
|
|
996 }
|
|
997 }
|
|
998 Thread::muxRelease (InflationLocks + ix ) ;
|
|
999 TEVENT (Inflate: INFLATING - yield/park) ;
|
|
1000 }
|
|
1001 } else {
|
|
1002 SpinPause() ; // SMP-polite spinning
|
|
1003 }
|
|
1004 }
|
|
1005 }
|
|
1006
|
|
1007 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
|
|
1008 // Inflate mutates the heap ...
|
|
1009 // Relaxing assertion for bug 6320749.
|
|
1010 assert (Universe::verify_in_progress() ||
|
|
1011 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
|
|
1012
|
|
1013 for (;;) {
|
|
1014 const markOop mark = object->mark() ;
|
|
1015 assert (!mark->has_bias_pattern(), "invariant") ;
|
|
1016
|
|
1017 // The mark can be in one of the following states:
|
|
1018 // * Inflated - just return
|
|
1019 // * Stack-locked - coerce it to inflated
|
|
1020 // * INFLATING - busy wait for conversion to complete
|
|
1021 // * Neutral - aggressively inflate the object.
|
|
1022 // * BIASED - Illegal. We should never see this
|
|
1023
|
|
1024 // CASE: inflated
|
|
1025 if (mark->has_monitor()) {
|
|
1026 ObjectMonitor * inf = mark->monitor() ;
|
|
1027 assert (inf->header()->is_neutral(), "invariant");
|
|
1028 assert (inf->object() == object, "invariant") ;
|
|
1029 assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
|
|
1030 return inf ;
|
|
1031 }
|
|
1032
|
|
1033 // CASE: inflation in progress - inflating over a stack-lock.
|
|
1034 // Some other thread is converting from stack-locked to inflated.
|
|
1035 // Only that thread can complete inflation -- other threads must wait.
|
|
1036 // The INFLATING value is transient.
|
|
1037 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
|
|
1038 // We could always eliminate polling by parking the thread on some auxiliary list.
|
|
1039 if (mark == markOopDesc::INFLATING()) {
|
|
1040 TEVENT (Inflate: spin while INFLATING) ;
|
|
1041 ReadStableMark(object) ;
|
|
1042 continue ;
|
|
1043 }
|
|
1044
|
|
1045 // CASE: stack-locked
|
|
1046 // Could be stack-locked either by this thread or by some other thread.
|
|
1047 //
|
|
1048 // Note that we allocate the objectmonitor speculatively, _before_ attempting
|
|
1049 // to install INFLATING into the mark word. We originally installed INFLATING,
|
|
1050 // allocated the objectmonitor, and then finally STed the address of the
|
|
1051 // objectmonitor into the mark. This was correct, but artificially lengthened
|
|
1052 // the interval in which INFLATED appeared in the mark, thus increasing
|
|
1053 // the odds of inflation contention.
|
|
1054 //
|
|
1055 // We now use per-thread private objectmonitor free lists.
|
|
1056 // These list are reprovisioned from the global free list outside the
|
|
1057 // critical INFLATING...ST interval. A thread can transfer
|
|
1058 // multiple objectmonitors en-mass from the global free list to its local free list.
|
|
1059 // This reduces coherency traffic and lock contention on the global free list.
|
|
1060 // Using such local free lists, it doesn't matter if the omAlloc() call appears
|
|
1061 // before or after the CAS(INFLATING) operation.
|
|
1062 // See the comments in omAlloc().
|
|
1063
|
|
1064 if (mark->has_locker()) {
|
|
1065 ObjectMonitor * m = omAlloc (Self) ;
|
|
1066 // Optimistically prepare the objectmonitor - anticipate successful CAS
|
|
1067 // We do this before the CAS in order to minimize the length of time
|
|
1068 // in which INFLATING appears in the mark.
|
|
1069 m->Recycle();
|
|
1070 m->FreeNext = NULL ;
|
|
1071 m->_Responsible = NULL ;
|
|
1072 m->OwnerIsThread = 0 ;
|
|
1073 m->_recursions = 0 ;
|
|
1074 m->_SpinDuration = Knob_SpinLimit ; // Consider: maintain by type/class
|
|
1075
|
|
1076 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
|
|
1077 if (cmp != mark) {
|
|
1078 omRelease (Self, m) ;
|
|
1079 continue ; // Interference -- just retry
|
|
1080 }
|
|
1081
|
|
1082 // We've successfully installed INFLATING (0) into the mark-word.
|
|
1083 // This is the only case where 0 will appear in a mark-work.
|
|
1084 // Only the singular thread that successfully swings the mark-word
|
|
1085 // to 0 can perform (or more precisely, complete) inflation.
|
|
1086 //
|
|
1087 // Why do we CAS a 0 into the mark-word instead of just CASing the
|
|
1088 // mark-word from the stack-locked value directly to the new inflated state?
|
|
1089 // Consider what happens when a thread unlocks a stack-locked object.
|
|
1090 // It attempts to use CAS to swing the displaced header value from the
|
|
1091 // on-stack basiclock back into the object header. Recall also that the
|
|
1092 // header value (hashcode, etc) can reside in (a) the object header, or
|
|
1093 // (b) a displaced header associated with the stack-lock, or (c) a displaced
|
|
1094 // header in an objectMonitor. The inflate() routine must copy the header
|
|
1095 // value from the basiclock on the owner's stack to the objectMonitor, all
|
|
1096 // the while preserving the hashCode stability invariants. If the owner
|
|
1097 // decides to release the lock while the value is 0, the unlock will fail
|
|
1098 // and control will eventually pass from slow_exit() to inflate. The owner
|
|
1099 // will then spin, waiting for the 0 value to disappear. Put another way,
|
|
1100 // the 0 causes the owner to stall if the owner happens to try to
|
|
1101 // drop the lock (restoring the header from the basiclock to the object)
|
|
1102 // while inflation is in-progress. This protocol avoids races that might
|
|
1103 // would otherwise permit hashCode values to change or "flicker" for an object.
|
|
1104 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
|
|
1105 // 0 serves as a "BUSY" inflate-in-progress indicator.
|
|
1106
|
|
1107
|
|
1108 // fetch the displaced mark from the owner's stack.
|
|
1109 // The owner can't die or unwind past the lock while our INFLATING
|
|
1110 // object is in the mark. Furthermore the owner can't complete
|
|
1111 // an unlock on the object, either.
|
|
1112 markOop dmw = mark->displaced_mark_helper() ;
|
|
1113 assert (dmw->is_neutral(), "invariant") ;
|
|
1114
|
|
1115 // Setup monitor fields to proper values -- prepare the monitor
|
|
1116 m->set_header(dmw) ;
|
|
1117
|
|
1118 // Optimization: if the mark->locker stack address is associated
|
|
1119 // with this thread we could simply set m->_owner = Self and
|
|
1120 // m->OwnerIsThread = 1. Note that a thread can inflate an object
|
|
1121 // that it has stack-locked -- as might happen in wait() -- directly
|
|
1122 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
|
|
1123 m->set_owner (mark->locker());
|
|
1124 m->set_object(object);
|
|
1125 // TODO-FIXME: assert BasicLock->dhw != 0.
|
|
1126
|
|
1127 // Must preserve store ordering. The monitor state must
|
|
1128 // be stable at the time of publishing the monitor address.
|
|
1129 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
|
|
1130 object->release_set_mark(markOopDesc::encode(m));
|
|
1131
|
|
1132 // Hopefully the performance counters are allocated on distinct cache lines
|
|
1133 // to avoid false sharing on MP systems ...
|
|
1134 if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
|
|
1135 TEVENT(Inflate: overwrite stacklock) ;
|
|
1136 if (TraceMonitorInflation) {
|
|
1137 if (object->is_instance()) {
|
|
1138 ResourceMark rm;
|
|
1139 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
|
|
1140 (intptr_t) object, (intptr_t) object->mark(),
|
|
1141 Klass::cast(object->klass())->external_name());
|
|
1142 }
|
|
1143 }
|
|
1144 return m ;
|
|
1145 }
|
|
1146
|
|
1147 // CASE: neutral
|
|
1148 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
|
|
1149 // If we know we're inflating for entry it's better to inflate by swinging a
|
|
1150 // pre-locked objectMonitor pointer into the object header. A successful
|
|
1151 // CAS inflates the object *and* confers ownership to the inflating thread.
|
|
1152 // In the current implementation we use a 2-step mechanism where we CAS()
|
|
1153 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
|
|
1154 // An inflateTry() method that we could call from fast_enter() and slow_enter()
|
|
1155 // would be useful.
|
|
1156
|
|
1157 assert (mark->is_neutral(), "invariant");
|
|
1158 ObjectMonitor * m = omAlloc (Self) ;
|
|
1159 // prepare m for installation - set monitor to initial state
|
|
1160 m->Recycle();
|
|
1161 m->set_header(mark);
|
|
1162 m->set_owner(NULL);
|
|
1163 m->set_object(object);
|
|
1164 m->OwnerIsThread = 1 ;
|
|
1165 m->_recursions = 0 ;
|
|
1166 m->FreeNext = NULL ;
|
|
1167 m->_Responsible = NULL ;
|
|
1168 m->_SpinDuration = Knob_SpinLimit ; // consider: keep metastats by type/class
|
|
1169
|
|
1170 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
|
|
1171 m->set_object (NULL) ;
|
|
1172 m->set_owner (NULL) ;
|
|
1173 m->OwnerIsThread = 0 ;
|
|
1174 m->Recycle() ;
|
|
1175 omRelease (Self, m) ;
|
|
1176 m = NULL ;
|
|
1177 continue ;
|
|
1178 // interference - the markword changed - just retry.
|
|
1179 // The state-transitions are one-way, so there's no chance of
|
|
1180 // live-lock -- "Inflated" is an absorbing state.
|
|
1181 }
|
|
1182
|
|
1183 // Hopefully the performance counters are allocated on distinct
|
|
1184 // cache lines to avoid false sharing on MP systems ...
|
|
1185 if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
|
|
1186 TEVENT(Inflate: overwrite neutral) ;
|
|
1187 if (TraceMonitorInflation) {
|
|
1188 if (object->is_instance()) {
|
|
1189 ResourceMark rm;
|
|
1190 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
|
|
1191 (intptr_t) object, (intptr_t) object->mark(),
|
|
1192 Klass::cast(object->klass())->external_name());
|
|
1193 }
|
|
1194 }
|
|
1195 return m ;
|
|
1196 }
|
|
1197 }
|
|
1198
|
|
1199
|
|
1200 // This the fast monitor enter. The interpreter and compiler use
|
|
1201 // some assembly copies of this code. Make sure update those code
|
|
1202 // if the following function is changed. The implementation is
|
|
1203 // extremely sensitive to race condition. Be careful.
|
|
1204
|
|
1205 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
|
|
1206 if (UseBiasedLocking) {
|
|
1207 if (!SafepointSynchronize::is_at_safepoint()) {
|
|
1208 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
|
|
1209 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
|
|
1210 return;
|
|
1211 }
|
|
1212 } else {
|
|
1213 assert(!attempt_rebias, "can not rebias toward VM thread");
|
|
1214 BiasedLocking::revoke_at_safepoint(obj);
|
|
1215 }
|
|
1216 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1217 }
|
|
1218
|
|
1219 THREAD->update_highest_lock((address)lock);
|
|
1220 slow_enter (obj, lock, THREAD) ;
|
|
1221 }
|
|
1222
|
|
1223 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
|
|
1224 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
|
|
1225 // if displaced header is null, the previous enter is recursive enter, no-op
|
|
1226 markOop dhw = lock->displaced_header();
|
|
1227 markOop mark ;
|
|
1228 if (dhw == NULL) {
|
|
1229 // Recursive stack-lock.
|
|
1230 // Diagnostics -- Could be: stack-locked, inflating, inflated.
|
|
1231 mark = object->mark() ;
|
|
1232 assert (!mark->is_neutral(), "invariant") ;
|
|
1233 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
|
|
1234 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
|
|
1235 }
|
|
1236 if (mark->has_monitor()) {
|
|
1237 ObjectMonitor * m = mark->monitor() ;
|
|
1238 assert(((oop)(m->object()))->mark() == mark, "invariant") ;
|
|
1239 assert(m->is_entered(THREAD), "invariant") ;
|
|
1240 }
|
|
1241 return ;
|
|
1242 }
|
|
1243
|
|
1244 mark = object->mark() ;
|
|
1245
|
|
1246 // If the object is stack-locked by the current thread, try to
|
|
1247 // swing the displaced header from the box back to the mark.
|
|
1248 if (mark == (markOop) lock) {
|
|
1249 assert (dhw->is_neutral(), "invariant") ;
|
|
1250 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
|
|
1251 TEVENT (fast_exit: release stacklock) ;
|
|
1252 return;
|
|
1253 }
|
|
1254 }
|
|
1255
|
|
1256 ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
|
|
1257 }
|
|
1258
|
|
1259 // This routine is used to handle interpreter/compiler slow case
|
|
1260 // We don't need to use fast path here, because it must have been
|
|
1261 // failed in the interpreter/compiler code.
|
|
1262 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
|
|
1263 markOop mark = obj->mark();
|
|
1264 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
|
|
1265
|
|
1266 if (mark->is_neutral()) {
|
|
1267 // Anticipate successful CAS -- the ST of the displaced mark must
|
|
1268 // be visible <= the ST performed by the CAS.
|
|
1269 lock->set_displaced_header(mark);
|
|
1270 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
|
|
1271 TEVENT (slow_enter: release stacklock) ;
|
|
1272 return ;
|
|
1273 }
|
|
1274 // Fall through to inflate() ...
|
|
1275 } else
|
|
1276 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
|
|
1277 assert(lock != mark->locker(), "must not re-lock the same lock");
|
|
1278 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
|
|
1279 lock->set_displaced_header(NULL);
|
|
1280 return;
|
|
1281 }
|
|
1282
|
|
1283 #if 0
|
|
1284 // The following optimization isn't particularly useful.
|
|
1285 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
|
|
1286 lock->set_displaced_header (NULL) ;
|
|
1287 return ;
|
|
1288 }
|
|
1289 #endif
|
|
1290
|
|
1291 // The object header will never be displaced to this lock,
|
|
1292 // so it does not matter what the value is, except that it
|
|
1293 // must be non-zero to avoid looking like a re-entrant lock,
|
|
1294 // and must not look locked either.
|
|
1295 lock->set_displaced_header(markOopDesc::unused_mark());
|
|
1296 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
|
|
1297 }
|
|
1298
|
|
1299 // This routine is used to handle interpreter/compiler slow case
|
|
1300 // We don't need to use fast path here, because it must have
|
|
1301 // failed in the interpreter/compiler code. Simply use the heavy
|
|
1302 // weight monitor should be ok, unless someone find otherwise.
|
|
1303 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
|
|
1304 fast_exit (object, lock, THREAD) ;
|
|
1305 }
|
|
1306
|
|
1307 // NOTE: must use heavy weight monitor to handle jni monitor enter
|
|
1308 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
|
|
1309 // the current locking is from JNI instead of Java code
|
|
1310 TEVENT (jni_enter) ;
|
|
1311 if (UseBiasedLocking) {
|
|
1312 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
|
|
1313 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1314 }
|
|
1315 THREAD->set_current_pending_monitor_is_from_java(false);
|
|
1316 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
|
|
1317 THREAD->set_current_pending_monitor_is_from_java(true);
|
|
1318 }
|
|
1319
|
|
1320 // NOTE: must use heavy weight monitor to handle jni monitor enter
|
|
1321 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
|
|
1322 if (UseBiasedLocking) {
|
|
1323 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
|
|
1324 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1325 }
|
|
1326
|
|
1327 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
|
|
1328 return monitor->try_enter(THREAD);
|
|
1329 }
|
|
1330
|
|
1331
|
|
1332 // NOTE: must use heavy weight monitor to handle jni monitor exit
|
|
1333 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
|
|
1334 TEVENT (jni_exit) ;
|
|
1335 if (UseBiasedLocking) {
|
|
1336 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
|
|
1337 }
|
|
1338 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1339
|
|
1340 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
|
|
1341 // If this thread has locked the object, exit the monitor. Note: can't use
|
|
1342 // monitor->check(CHECK); must exit even if an exception is pending.
|
|
1343 if (monitor->check(THREAD)) {
|
|
1344 monitor->exit(THREAD);
|
|
1345 }
|
|
1346 }
|
|
1347
|
|
1348 // complete_exit()/reenter() are used to wait on a nested lock
|
|
1349 // i.e. to give up an outer lock completely and then re-enter
|
|
1350 // Used when holding nested locks - lock acquisition order: lock1 then lock2
|
|
1351 // 1) complete_exit lock1 - saving recursion count
|
|
1352 // 2) wait on lock2
|
|
1353 // 3) when notified on lock2, unlock lock2
|
|
1354 // 4) reenter lock1 with original recursion count
|
|
1355 // 5) lock lock2
|
|
1356 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
|
|
1357 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
|
|
1358 TEVENT (complete_exit) ;
|
|
1359 if (UseBiasedLocking) {
|
|
1360 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
|
|
1361 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1362 }
|
|
1363
|
|
1364 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
|
|
1365
|
|
1366 return monitor->complete_exit(THREAD);
|
|
1367 }
|
|
1368
|
|
1369 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
|
|
1370 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
|
|
1371 TEVENT (reenter) ;
|
|
1372 if (UseBiasedLocking) {
|
|
1373 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
|
|
1374 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1375 }
|
|
1376
|
|
1377 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
|
|
1378
|
|
1379 monitor->reenter(recursion, THREAD);
|
|
1380 }
|
|
1381
|
|
1382 // This exists only as a workaround of dtrace bug 6254741
|
|
1383 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
|
|
1384 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
|
|
1385 return 0;
|
|
1386 }
|
|
1387
|
|
1388 // NOTE: must use heavy weight monitor to handle wait()
|
|
1389 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
|
|
1390 if (UseBiasedLocking) {
|
|
1391 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
|
|
1392 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1393 }
|
|
1394 if (millis < 0) {
|
|
1395 TEVENT (wait - throw IAX) ;
|
|
1396 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
|
|
1397 }
|
|
1398 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
|
|
1399 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
|
|
1400 monitor->wait(millis, true, THREAD);
|
|
1401
|
|
1402 /* This dummy call is in place to get around dtrace bug 6254741. Once
|
|
1403 that's fixed we can uncomment the following line and remove the call */
|
|
1404 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
|
|
1405 dtrace_waited_probe(monitor, obj, THREAD);
|
|
1406 }
|
|
1407
|
|
1408 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
|
|
1409 if (UseBiasedLocking) {
|
|
1410 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
|
|
1411 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1412 }
|
|
1413 if (millis < 0) {
|
|
1414 TEVENT (wait - throw IAX) ;
|
|
1415 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
|
|
1416 }
|
|
1417 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
|
|
1418 }
|
|
1419
|
|
1420 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
|
|
1421 if (UseBiasedLocking) {
|
|
1422 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
|
|
1423 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1424 }
|
|
1425
|
|
1426 markOop mark = obj->mark();
|
|
1427 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
|
|
1428 return;
|
|
1429 }
|
|
1430 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
|
|
1431 }
|
|
1432
|
|
1433 // NOTE: see comment of notify()
|
|
1434 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
|
|
1435 if (UseBiasedLocking) {
|
|
1436 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
|
|
1437 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1438 }
|
|
1439
|
|
1440 markOop mark = obj->mark();
|
|
1441 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
|
|
1442 return;
|
|
1443 }
|
|
1444 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
|
|
1445 }
|
|
1446
|
|
1447 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
|
|
1448 if (UseBiasedLocking) {
|
|
1449 // NOTE: many places throughout the JVM do not expect a safepoint
|
|
1450 // to be taken here, in particular most operations on perm gen
|
|
1451 // objects. However, we only ever bias Java instances and all of
|
|
1452 // the call sites of identity_hash that might revoke biases have
|
|
1453 // been checked to make sure they can handle a safepoint. The
|
|
1454 // added check of the bias pattern is to avoid useless calls to
|
|
1455 // thread-local storage.
|
|
1456 if (obj->mark()->has_bias_pattern()) {
|
|
1457 // Box and unbox the raw reference just in case we cause a STW safepoint.
|
|
1458 Handle hobj (Self, obj) ;
|
|
1459 // Relaxing assertion for bug 6320749.
|
|
1460 assert (Universe::verify_in_progress() ||
|
|
1461 !SafepointSynchronize::is_at_safepoint(),
|
|
1462 "biases should not be seen by VM thread here");
|
|
1463 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
|
|
1464 obj = hobj() ;
|
|
1465 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1466 }
|
|
1467 }
|
|
1468
|
|
1469 // hashCode() is a heap mutator ...
|
|
1470 // Relaxing assertion for bug 6320749.
|
|
1471 assert (Universe::verify_in_progress() ||
|
|
1472 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
|
|
1473 assert (Universe::verify_in_progress() ||
|
|
1474 Self->is_Java_thread() , "invariant") ;
|
|
1475 assert (Universe::verify_in_progress() ||
|
|
1476 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
|
|
1477
|
|
1478 ObjectMonitor* monitor = NULL;
|
|
1479 markOop temp, test;
|
|
1480 intptr_t hash;
|
|
1481 markOop mark = ReadStableMark (obj);
|
|
1482
|
|
1483 // object should remain ineligible for biased locking
|
|
1484 assert (!mark->has_bias_pattern(), "invariant") ;
|
|
1485
|
|
1486 if (mark->is_neutral()) {
|
|
1487 hash = mark->hash(); // this is a normal header
|
|
1488 if (hash) { // if it has hash, just return it
|
|
1489 return hash;
|
|
1490 }
|
|
1491 hash = get_next_hash(Self, obj); // allocate a new hash code
|
|
1492 temp = mark->copy_set_hash(hash); // merge the hash code into header
|
|
1493 // use (machine word version) atomic operation to install the hash
|
|
1494 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
|
|
1495 if (test == mark) {
|
|
1496 return hash;
|
|
1497 }
|
|
1498 // If atomic operation failed, we must inflate the header
|
|
1499 // into heavy weight monitor. We could add more code here
|
|
1500 // for fast path, but it does not worth the complexity.
|
|
1501 } else if (mark->has_monitor()) {
|
|
1502 monitor = mark->monitor();
|
|
1503 temp = monitor->header();
|
|
1504 assert (temp->is_neutral(), "invariant") ;
|
|
1505 hash = temp->hash();
|
|
1506 if (hash) {
|
|
1507 return hash;
|
|
1508 }
|
|
1509 // Skip to the following code to reduce code size
|
|
1510 } else if (Self->is_lock_owned((address)mark->locker())) {
|
|
1511 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
|
|
1512 assert (temp->is_neutral(), "invariant") ;
|
|
1513 hash = temp->hash(); // by current thread, check if the displaced
|
|
1514 if (hash) { // header contains hash code
|
|
1515 return hash;
|
|
1516 }
|
|
1517 // WARNING:
|
|
1518 // The displaced header is strictly immutable.
|
|
1519 // It can NOT be changed in ANY cases. So we have
|
|
1520 // to inflate the header into heavyweight monitor
|
|
1521 // even the current thread owns the lock. The reason
|
|
1522 // is the BasicLock (stack slot) will be asynchronously
|
|
1523 // read by other threads during the inflate() function.
|
|
1524 // Any change to stack may not propagate to other threads
|
|
1525 // correctly.
|
|
1526 }
|
|
1527
|
|
1528 // Inflate the monitor to set hash code
|
|
1529 monitor = ObjectSynchronizer::inflate(Self, obj);
|
|
1530 // Load displaced header and check it has hash code
|
|
1531 mark = monitor->header();
|
|
1532 assert (mark->is_neutral(), "invariant") ;
|
|
1533 hash = mark->hash();
|
|
1534 if (hash == 0) {
|
|
1535 hash = get_next_hash(Self, obj);
|
|
1536 temp = mark->copy_set_hash(hash); // merge hash code into header
|
|
1537 assert (temp->is_neutral(), "invariant") ;
|
|
1538 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
|
|
1539 if (test != mark) {
|
|
1540 // The only update to the header in the monitor (outside GC)
|
|
1541 // is install the hash code. If someone add new usage of
|
|
1542 // displaced header, please update this code
|
|
1543 hash = test->hash();
|
|
1544 assert (test->is_neutral(), "invariant") ;
|
|
1545 assert (hash != 0, "Trivial unexpected object/monitor header usage.");
|
|
1546 }
|
|
1547 }
|
|
1548 // We finally get the hash
|
|
1549 return hash;
|
|
1550 }
|
|
1551
|
|
1552 // Deprecated -- use FastHashCode() instead.
|
|
1553
|
|
1554 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
|
|
1555 return FastHashCode (Thread::current(), obj()) ;
|
|
1556 }
|
|
1557
|
|
1558 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
|
|
1559 Handle h_obj) {
|
|
1560 if (UseBiasedLocking) {
|
|
1561 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
|
|
1562 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1563 }
|
|
1564
|
|
1565 assert(thread == JavaThread::current(), "Can only be called on current thread");
|
|
1566 oop obj = h_obj();
|
|
1567
|
|
1568 markOop mark = ReadStableMark (obj) ;
|
|
1569
|
|
1570 // Uncontended case, header points to stack
|
|
1571 if (mark->has_locker()) {
|
|
1572 return thread->is_lock_owned((address)mark->locker());
|
|
1573 }
|
|
1574 // Contended case, header points to ObjectMonitor (tagged pointer)
|
|
1575 if (mark->has_monitor()) {
|
|
1576 ObjectMonitor* monitor = mark->monitor();
|
|
1577 return monitor->is_entered(thread) != 0 ;
|
|
1578 }
|
|
1579 // Unlocked case, header in place
|
|
1580 assert(mark->is_neutral(), "sanity check");
|
|
1581 return false;
|
|
1582 }
|
|
1583
|
|
1584 // Be aware of this method could revoke bias of the lock object.
|
|
1585 // This method querys the ownership of the lock handle specified by 'h_obj'.
|
|
1586 // If the current thread owns the lock, it returns owner_self. If no
|
|
1587 // thread owns the lock, it returns owner_none. Otherwise, it will return
|
|
1588 // ower_other.
|
|
1589 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
|
|
1590 (JavaThread *self, Handle h_obj) {
|
|
1591 // The caller must beware this method can revoke bias, and
|
|
1592 // revocation can result in a safepoint.
|
|
1593 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
|
|
1594 assert (self->thread_state() != _thread_blocked , "invariant") ;
|
|
1595
|
|
1596 // Possible mark states: neutral, biased, stack-locked, inflated
|
|
1597
|
|
1598 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
|
|
1599 // CASE: biased
|
|
1600 BiasedLocking::revoke_and_rebias(h_obj, false, self);
|
|
1601 assert(!h_obj->mark()->has_bias_pattern(),
|
|
1602 "biases should be revoked by now");
|
|
1603 }
|
|
1604
|
|
1605 assert(self == JavaThread::current(), "Can only be called on current thread");
|
|
1606 oop obj = h_obj();
|
|
1607 markOop mark = ReadStableMark (obj) ;
|
|
1608
|
|
1609 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
|
|
1610 if (mark->has_locker()) {
|
|
1611 return self->is_lock_owned((address)mark->locker()) ?
|
|
1612 owner_self : owner_other;
|
|
1613 }
|
|
1614
|
|
1615 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
|
|
1616 // The Object:ObjectMonitor relationship is stable as long as we're
|
|
1617 // not at a safepoint.
|
|
1618 if (mark->has_monitor()) {
|
|
1619 void * owner = mark->monitor()->_owner ;
|
|
1620 if (owner == NULL) return owner_none ;
|
|
1621 return (owner == self ||
|
|
1622 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
|
|
1623 }
|
|
1624
|
|
1625 // CASE: neutral
|
|
1626 assert(mark->is_neutral(), "sanity check");
|
|
1627 return owner_none ; // it's unlocked
|
|
1628 }
|
|
1629
|
|
1630 // FIXME: jvmti should call this
|
|
1631 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
|
|
1632 if (UseBiasedLocking) {
|
|
1633 if (SafepointSynchronize::is_at_safepoint()) {
|
|
1634 BiasedLocking::revoke_at_safepoint(h_obj);
|
|
1635 } else {
|
|
1636 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
|
|
1637 }
|
|
1638 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
|
1639 }
|
|
1640
|
|
1641 oop obj = h_obj();
|
|
1642 address owner = NULL;
|
|
1643
|
|
1644 markOop mark = ReadStableMark (obj) ;
|
|
1645
|
|
1646 // Uncontended case, header points to stack
|
|
1647 if (mark->has_locker()) {
|
|
1648 owner = (address) mark->locker();
|
|
1649 }
|
|
1650
|
|
1651 // Contended case, header points to ObjectMonitor (tagged pointer)
|
|
1652 if (mark->has_monitor()) {
|
|
1653 ObjectMonitor* monitor = mark->monitor();
|
|
1654 assert(monitor != NULL, "monitor should be non-null");
|
|
1655 owner = (address) monitor->owner();
|
|
1656 }
|
|
1657
|
|
1658 if (owner != NULL) {
|
|
1659 return Threads::owning_thread_from_monitor_owner(owner, doLock);
|
|
1660 }
|
|
1661
|
|
1662 // Unlocked case, header in place
|
|
1663 // Cannot have assertion since this object may have been
|
|
1664 // locked by another thread when reaching here.
|
|
1665 // assert(mark->is_neutral(), "sanity check");
|
|
1666
|
|
1667 return NULL;
|
|
1668 }
|
|
1669
|
|
1670 // Iterate through monitor cache and attempt to release thread's monitors
|
|
1671 // Gives up on a particular monitor if an exception occurs, but continues
|
|
1672 // the overall iteration, swallowing the exception.
|
|
1673 class ReleaseJavaMonitorsClosure: public MonitorClosure {
|
|
1674 private:
|
|
1675 TRAPS;
|
|
1676
|
|
1677 public:
|
|
1678 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
|
|
1679 void do_monitor(ObjectMonitor* mid) {
|
|
1680 if (mid->owner() == THREAD) {
|
|
1681 (void)mid->complete_exit(CHECK);
|
|
1682 }
|
|
1683 }
|
|
1684 };
|
|
1685
|
|
1686 // Release all inflated monitors owned by THREAD. Lightweight monitors are
|
|
1687 // ignored. This is meant to be called during JNI thread detach which assumes
|
|
1688 // all remaining monitors are heavyweight. All exceptions are swallowed.
|
|
1689 // Scanning the extant monitor list can be time consuming.
|
|
1690 // A simple optimization is to add a per-thread flag that indicates a thread
|
|
1691 // called jni_monitorenter() during its lifetime.
|
|
1692 //
|
|
1693 // Instead of No_Savepoint_Verifier it might be cheaper to
|
|
1694 // use an idiom of the form:
|
|
1695 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
|
|
1696 // <code that must not run at safepoint>
|
|
1697 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
|
|
1698 // Since the tests are extremely cheap we could leave them enabled
|
|
1699 // for normal product builds.
|
|
1700
|
|
1701 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
|
|
1702 assert(THREAD == JavaThread::current(), "must be current Java thread");
|
|
1703 No_Safepoint_Verifier nsv ;
|
|
1704 ReleaseJavaMonitorsClosure rjmc(THREAD);
|
|
1705 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
|
|
1706 ObjectSynchronizer::monitors_iterate(&rjmc);
|
|
1707 Thread::muxRelease(&ListLock);
|
|
1708 THREAD->clear_pending_exception();
|
|
1709 }
|
|
1710
|
|
1711 // Visitors ...
|
|
1712
|
|
1713 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
|
|
1714 ObjectMonitor* block = gBlockList;
|
|
1715 ObjectMonitor* mid;
|
|
1716 while (block) {
|
|
1717 assert(block->object() == CHAINMARKER, "must be a block header");
|
|
1718 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
|
|
1719 mid = block + i;
|
|
1720 oop object = (oop) mid->object();
|
|
1721 if (object != NULL) {
|
|
1722 closure->do_monitor(mid);
|
|
1723 }
|
|
1724 }
|
|
1725 block = (ObjectMonitor*) block->FreeNext;
|
|
1726 }
|
|
1727 }
|
|
1728
|
|
1729 void ObjectSynchronizer::oops_do(OopClosure* f) {
|
|
1730 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
|
1731 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
|
|
1732 assert(block->object() == CHAINMARKER, "must be a block header");
|
|
1733 for (int i = 1; i < _BLOCKSIZE; i++) {
|
|
1734 ObjectMonitor* mid = &block[i];
|
|
1735 if (mid->object() != NULL) {
|
|
1736 f->do_oop((oop*)mid->object_addr());
|
|
1737 }
|
|
1738 }
|
|
1739 }
|
|
1740 }
|
|
1741
|
|
1742 // Deflate_idle_monitors() is called at all safepoints, immediately
|
|
1743 // after all mutators are stopped, but before any objects have moved.
|
|
1744 // It traverses the list of known monitors, deflating where possible.
|
|
1745 // The scavenged monitor are returned to the monitor free list.
|
|
1746 //
|
|
1747 // Beware that we scavenge at *every* stop-the-world point.
|
|
1748 // Having a large number of monitors in-circulation negatively
|
|
1749 // impacts the performance of some applications (e.g., PointBase).
|
|
1750 // Broadly, we want to minimize the # of monitors in circulation.
|
|
1751 // Alternately, we could partition the active monitors into sub-lists
|
|
1752 // of those that need scanning and those that do not.
|
|
1753 // Specifically, we would add a new sub-list of objectmonitors
|
|
1754 // that are in-circulation and potentially active. deflate_idle_monitors()
|
|
1755 // would scan only that list. Other monitors could reside on a quiescent
|
|
1756 // list. Such sequestered monitors wouldn't need to be scanned by
|
|
1757 // deflate_idle_monitors(). omAlloc() would first check the global free list,
|
|
1758 // then the quiescent list, and, failing those, would allocate a new block.
|
|
1759 // Deflate_idle_monitors() would scavenge and move monitors to the
|
|
1760 // quiescent list.
|
|
1761 //
|
|
1762 // Perversely, the heap size -- and thus the STW safepoint rate --
|
|
1763 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
|
|
1764 // which in turn can mean large(r) numbers of objectmonitors in circulation.
|
|
1765 // This is an unfortunate aspect of this design.
|
|
1766 //
|
|
1767 // Another refinement would be to refrain from calling deflate_idle_monitors()
|
|
1768 // except at stop-the-world points associated with garbage collections.
|
|
1769 //
|
|
1770 // An even better solution would be to deflate on-the-fly, aggressively,
|
|
1771 // at monitorexit-time as is done in EVM's metalock or Relaxed Locks.
|
|
1772
|
|
1773 void ObjectSynchronizer::deflate_idle_monitors() {
|
|
1774 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
|
1775 int nInuse = 0 ; // currently associated with objects
|
|
1776 int nInCirculation = 0 ; // extant
|
|
1777 int nScavenged = 0 ; // reclaimed
|
|
1778
|
|
1779 ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors
|
|
1780 ObjectMonitor * FreeTail = NULL ;
|
|
1781
|
|
1782 // Iterate over all extant monitors - Scavenge all idle monitors.
|
|
1783 TEVENT (deflate_idle_monitors) ;
|
|
1784 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
|
|
1785 assert(block->object() == CHAINMARKER, "must be a block header");
|
|
1786 nInCirculation += _BLOCKSIZE ;
|
|
1787 for (int i = 1 ; i < _BLOCKSIZE; i++) {
|
|
1788 ObjectMonitor* mid = &block[i];
|
|
1789 oop obj = (oop) mid->object();
|
|
1790
|
|
1791 if (obj == NULL) {
|
|
1792 // The monitor is not associated with an object.
|
|
1793 // The monitor should either be a thread-specific private
|
|
1794 // free list or the global free list.
|
|
1795 // obj == NULL IMPLIES mid->is_busy() == 0
|
|
1796 guarantee (!mid->is_busy(), "invariant") ;
|
|
1797 continue ;
|
|
1798 }
|
|
1799
|
|
1800 // Normal case ... The monitor is associated with obj.
|
|
1801 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
|
|
1802 guarantee (mid == obj->mark()->monitor(), "invariant");
|
|
1803 guarantee (mid->header()->is_neutral(), "invariant");
|
|
1804
|
|
1805 if (mid->is_busy()) {
|
|
1806 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
|
|
1807 nInuse ++ ;
|
|
1808 } else {
|
|
1809 // Deflate the monitor if it is no longer being used
|
|
1810 // It's idle - scavenge and return to the global free list
|
|
1811 // plain old deflation ...
|
|
1812 TEVENT (deflate_idle_monitors - scavenge1) ;
|
|
1813 if (TraceMonitorInflation) {
|
|
1814 if (obj->is_instance()) {
|
|
1815 ResourceMark rm;
|
|
1816 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
|
|
1817 (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name());
|
|
1818 }
|
|
1819 }
|
|
1820
|
|
1821 // Restore the header back to obj
|
|
1822 obj->release_set_mark(mid->header());
|
|
1823 mid->clear();
|
|
1824
|
|
1825 assert (mid->object() == NULL, "invariant") ;
|
|
1826
|
|
1827 // Move the object to the working free list defined by FreeHead,FreeTail.
|
|
1828 mid->FreeNext = NULL ;
|
|
1829 if (FreeHead == NULL) FreeHead = mid ;
|
|
1830 if (FreeTail != NULL) FreeTail->FreeNext = mid ;
|
|
1831 FreeTail = mid ;
|
|
1832 nScavenged ++ ;
|
|
1833 }
|
|
1834 }
|
|
1835 }
|
|
1836
|
|
1837 // Move the scavenged monitors back to the global free list.
|
|
1838 // In theory we don't need the freelist lock as we're at a STW safepoint.
|
|
1839 // omAlloc() and omFree() can only be called while a thread is _not in safepoint state.
|
|
1840 // But it's remotely possible that omFlush() or release_monitors_owned_by_thread()
|
|
1841 // might be called while not at a global STW safepoint. In the interest of
|
|
1842 // safety we protect the following access with ListLock.
|
|
1843 // An even more conservative and prudent approach would be to guard
|
|
1844 // the main loop in scavenge_idle_monitors() with ListLock.
|
|
1845 if (FreeHead != NULL) {
|
|
1846 guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
|
|
1847 assert (FreeTail->FreeNext == NULL, "invariant") ;
|
|
1848 // constant-time list splice - prepend scavenged segment to gFreeList
|
|
1849 Thread::muxAcquire (&ListLock, "scavenge - return") ;
|
|
1850 FreeTail->FreeNext = gFreeList ;
|
|
1851 gFreeList = FreeHead ;
|
|
1852 Thread::muxRelease (&ListLock) ;
|
|
1853 }
|
|
1854
|
|
1855 if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ;
|
|
1856 if (_sync_MonExtant != NULL) _sync_MonExtant ->set_value(nInCirculation);
|
|
1857
|
|
1858 // TODO: Add objectMonitor leak detection.
|
|
1859 // Audit/inventory the objectMonitors -- make sure they're all accounted for.
|
|
1860 GVars.stwRandom = os::random() ;
|
|
1861 GVars.stwCycle ++ ;
|
|
1862 }
|
|
1863
|
|
1864 // A macro is used below because there may already be a pending
|
|
1865 // exception which should not abort the execution of the routines
|
|
1866 // which use this (which is why we don't put this into check_slow and
|
|
1867 // call it with a CHECK argument).
|
|
1868
|
|
1869 #define CHECK_OWNER() \
|
|
1870 do { \
|
|
1871 if (THREAD != _owner) { \
|
|
1872 if (THREAD->is_lock_owned((address) _owner)) { \
|
|
1873 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \
|
|
1874 _recursions = 0; \
|
|
1875 OwnerIsThread = 1 ; \
|
|
1876 } else { \
|
|
1877 TEVENT (Throw IMSX) ; \
|
|
1878 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
|
|
1879 } \
|
|
1880 } \
|
|
1881 } while (false)
|
|
1882
|
|
1883 // TODO-FIXME: eliminate ObjectWaiters. Replace this visitor/enumerator
|
|
1884 // interface with a simple FirstWaitingThread(), NextWaitingThread() interface.
|
|
1885
|
|
1886 ObjectWaiter* ObjectMonitor::first_waiter() {
|
|
1887 return _WaitSet;
|
|
1888 }
|
|
1889
|
|
1890 ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) {
|
|
1891 return o->_next;
|
|
1892 }
|
|
1893
|
|
1894 Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) {
|
|
1895 return o->_thread;
|
|
1896 }
|
|
1897
|
|
1898 // initialize the monitor, exception the semaphore, all other fields
|
|
1899 // are simple integers or pointers
|
|
1900 ObjectMonitor::ObjectMonitor() {
|
|
1901 _header = NULL;
|
|
1902 _count = 0;
|
|
1903 _waiters = 0,
|
|
1904 _recursions = 0;
|
|
1905 _object = NULL;
|
|
1906 _owner = NULL;
|
|
1907 _WaitSet = NULL;
|
|
1908 _WaitSetLock = 0 ;
|
|
1909 _Responsible = NULL ;
|
|
1910 _succ = NULL ;
|
|
1911 _cxq = NULL ;
|
|
1912 FreeNext = NULL ;
|
|
1913 _EntryList = NULL ;
|
|
1914 _SpinFreq = 0 ;
|
|
1915 _SpinClock = 0 ;
|
|
1916 OwnerIsThread = 0 ;
|
|
1917 }
|
|
1918
|
|
1919 ObjectMonitor::~ObjectMonitor() {
|
|
1920 // TODO: Add asserts ...
|
|
1921 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
|
|
1922 // _count == 0 _EntryList == NULL etc
|
|
1923 }
|
|
1924
|
|
1925 intptr_t ObjectMonitor::is_busy() const {
|
|
1926 // TODO-FIXME: merge _count and _waiters.
|
|
1927 // TODO-FIXME: assert _owner == null implies _recursions = 0
|
|
1928 // TODO-FIXME: assert _WaitSet != null implies _count > 0
|
|
1929 return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
|
|
1930 }
|
|
1931
|
|
1932 void ObjectMonitor::Recycle () {
|
|
1933 // TODO: add stronger asserts ...
|
|
1934 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
|
|
1935 // _count == 0 EntryList == NULL
|
|
1936 // _recursions == 0 _WaitSet == NULL
|
|
1937 // TODO: assert (is_busy()|_recursions) == 0
|
|
1938 _succ = NULL ;
|
|
1939 _EntryList = NULL ;
|
|
1940 _cxq = NULL ;
|
|
1941 _WaitSet = NULL ;
|
|
1942 _recursions = 0 ;
|
|
1943 _SpinFreq = 0 ;
|
|
1944 _SpinClock = 0 ;
|
|
1945 OwnerIsThread = 0 ;
|
|
1946 }
|
|
1947
|
|
1948 // WaitSet management ...
|
|
1949
|
|
1950 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
|
|
1951 assert(node != NULL, "should not dequeue NULL node");
|
|
1952 assert(node->_prev == NULL, "node already in list");
|
|
1953 assert(node->_next == NULL, "node already in list");
|
|
1954 // put node at end of queue (circular doubly linked list)
|
|
1955 if (_WaitSet == NULL) {
|
|
1956 _WaitSet = node;
|
|
1957 node->_prev = node;
|
|
1958 node->_next = node;
|
|
1959 } else {
|
|
1960 ObjectWaiter* head = _WaitSet ;
|
|
1961 ObjectWaiter* tail = head->_prev;
|
|
1962 assert(tail->_next == head, "invariant check");
|
|
1963 tail->_next = node;
|
|
1964 head->_prev = node;
|
|
1965 node->_next = head;
|
|
1966 node->_prev = tail;
|
|
1967 }
|
|
1968 }
|
|
1969
|
|
1970 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
|
|
1971 // dequeue the very first waiter
|
|
1972 ObjectWaiter* waiter = _WaitSet;
|
|
1973 if (waiter) {
|
|
1974 DequeueSpecificWaiter(waiter);
|
|
1975 }
|
|
1976 return waiter;
|
|
1977 }
|
|
1978
|
|
1979 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
|
|
1980 assert(node != NULL, "should not dequeue NULL node");
|
|
1981 assert(node->_prev != NULL, "node already removed from list");
|
|
1982 assert(node->_next != NULL, "node already removed from list");
|
|
1983 // when the waiter has woken up because of interrupt,
|
|
1984 // timeout or other spurious wake-up, dequeue the
|
|
1985 // waiter from waiting list
|
|
1986 ObjectWaiter* next = node->_next;
|
|
1987 if (next == node) {
|
|
1988 assert(node->_prev == node, "invariant check");
|
|
1989 _WaitSet = NULL;
|
|
1990 } else {
|
|
1991 ObjectWaiter* prev = node->_prev;
|
|
1992 assert(prev->_next == node, "invariant check");
|
|
1993 assert(next->_prev == node, "invariant check");
|
|
1994 next->_prev = prev;
|
|
1995 prev->_next = next;
|
|
1996 if (_WaitSet == node) {
|
|
1997 _WaitSet = next;
|
|
1998 }
|
|
1999 }
|
|
2000 node->_next = NULL;
|
|
2001 node->_prev = NULL;
|
|
2002 }
|
|
2003
|
|
2004 static char * kvGet (char * kvList, const char * Key) {
|
|
2005 if (kvList == NULL) return NULL ;
|
|
2006 size_t n = strlen (Key) ;
|
|
2007 char * Search ;
|
|
2008 for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
|
|
2009 if (strncmp (Search, Key, n) == 0) {
|
|
2010 if (Search[n] == '=') return Search + n + 1 ;
|
|
2011 if (Search[n] == 0) return (char *) "1" ;
|
|
2012 }
|
|
2013 }
|
|
2014 return NULL ;
|
|
2015 }
|
|
2016
|
|
2017 static int kvGetInt (char * kvList, const char * Key, int Default) {
|
|
2018 char * v = kvGet (kvList, Key) ;
|
|
2019 int rslt = v ? ::strtol (v, NULL, 0) : Default ;
|
|
2020 if (Knob_ReportSettings && v != NULL) {
|
|
2021 ::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
|
|
2022 ::fflush (stdout) ;
|
|
2023 }
|
|
2024 return rslt ;
|
|
2025 }
|
|
2026
|
|
2027 // By convention we unlink a contending thread from EntryList|cxq immediately
|
|
2028 // after the thread acquires the lock in ::enter(). Equally, we could defer
|
|
2029 // unlinking the thread until ::exit()-time.
|
|
2030
|
|
2031 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
|
|
2032 {
|
|
2033 assert (_owner == Self, "invariant") ;
|
|
2034 assert (SelfNode->_thread == Self, "invariant") ;
|
|
2035
|
|
2036 if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
|
|
2037 // Normal case: remove Self from the DLL EntryList .
|
|
2038 // This is a constant-time operation.
|
|
2039 ObjectWaiter * nxt = SelfNode->_next ;
|
|
2040 ObjectWaiter * prv = SelfNode->_prev ;
|
|
2041 if (nxt != NULL) nxt->_prev = prv ;
|
|
2042 if (prv != NULL) prv->_next = nxt ;
|
|
2043 if (SelfNode == _EntryList ) _EntryList = nxt ;
|
|
2044 assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
|
|
2045 assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
|
|
2046 TEVENT (Unlink from EntryList) ;
|
|
2047 } else {
|
|
2048 guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
|
|
2049 // Inopportune interleaving -- Self is still on the cxq.
|
|
2050 // This usually means the enqueue of self raced an exiting thread.
|
|
2051 // Normally we'll find Self near the front of the cxq, so
|
|
2052 // dequeueing is typically fast. If needbe we can accelerate
|
|
2053 // this with some MCS/CHL-like bidirectional list hints and advisory
|
|
2054 // back-links so dequeueing from the interior will normally operate
|
|
2055 // in constant-time.
|
|
2056 // Dequeue Self from either the head (with CAS) or from the interior
|
|
2057 // with a linear-time scan and normal non-atomic memory operations.
|
|
2058 // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
|
|
2059 // and then unlink Self from EntryList. We have to drain eventually,
|
|
2060 // so it might as well be now.
|
|
2061
|
|
2062 ObjectWaiter * v = _cxq ;
|
|
2063 assert (v != NULL, "invariant") ;
|
|
2064 if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
|
|
2065 // The CAS above can fail from interference IFF a "RAT" arrived.
|
|
2066 // In that case Self must be in the interior and can no longer be
|
|
2067 // at the head of cxq.
|
|
2068 if (v == SelfNode) {
|
|
2069 assert (_cxq != v, "invariant") ;
|
|
2070 v = _cxq ; // CAS above failed - start scan at head of list
|
|
2071 }
|
|
2072 ObjectWaiter * p ;
|
|
2073 ObjectWaiter * q = NULL ;
|
|
2074 for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
|
|
2075 q = p ;
|
|
2076 assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
|
|
2077 }
|
|
2078 assert (v != SelfNode, "invariant") ;
|
|
2079 assert (p == SelfNode, "Node not found on cxq") ;
|
|
2080 assert (p != _cxq, "invariant") ;
|
|
2081 assert (q != NULL, "invariant") ;
|
|
2082 assert (q->_next == p, "invariant") ;
|
|
2083 q->_next = p->_next ;
|
|
2084 }
|
|
2085 TEVENT (Unlink from cxq) ;
|
|
2086 }
|
|
2087
|
|
2088 // Diagnostic hygiene ...
|
|
2089 SelfNode->_prev = (ObjectWaiter *) 0xBAD ;
|
|
2090 SelfNode->_next = (ObjectWaiter *) 0xBAD ;
|
|
2091 SelfNode->TState = ObjectWaiter::TS_RUN ;
|
|
2092 }
|
|
2093
|
|
2094 // Caveat: TryLock() is not necessarily serializing if it returns failure.
|
|
2095 // Callers must compensate as needed.
|
|
2096
|
|
2097 int ObjectMonitor::TryLock (Thread * Self) {
|
|
2098 for (;;) {
|
|
2099 void * own = _owner ;
|
|
2100 if (own != NULL) return 0 ;
|
|
2101 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
|
|
2102 // Either guarantee _recursions == 0 or set _recursions = 0.
|
|
2103 assert (_recursions == 0, "invariant") ;
|
|
2104 assert (_owner == Self, "invariant") ;
|
|
2105 // CONSIDER: set or assert that OwnerIsThread == 1
|
|
2106 return 1 ;
|
|
2107 }
|
|
2108 // The lock had been free momentarily, but we lost the race to the lock.
|
|
2109 // Interference -- the CAS failed.
|
|
2110 // We can either return -1 or retry.
|
|
2111 // Retry doesn't make as much sense because the lock was just acquired.
|
|
2112 if (true) return -1 ;
|
|
2113 }
|
|
2114 }
|
|
2115
|
|
2116 // NotRunnable() -- informed spinning
|
|
2117 //
|
|
2118 // Don't bother spinning if the owner is not eligible to drop the lock.
|
|
2119 // Peek at the owner's schedctl.sc_state and Thread._thread_values and
|
|
2120 // spin only if the owner thread is _thread_in_Java or _thread_in_vm.
|
|
2121 // The thread must be runnable in order to drop the lock in timely fashion.
|
|
2122 // If the _owner is not runnable then spinning will not likely be
|
|
2123 // successful (profitable).
|
|
2124 //
|
|
2125 // Beware -- the thread referenced by _owner could have died
|
|
2126 // so a simply fetch from _owner->_thread_state might trap.
|
|
2127 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
|
|
2128 // Because of the lifecycle issues the schedctl and _thread_state values
|
|
2129 // observed by NotRunnable() might be garbage. NotRunnable must
|
|
2130 // tolerate this and consider the observed _thread_state value
|
|
2131 // as advisory.
|
|
2132 //
|
|
2133 // Beware too, that _owner is sometimes a BasicLock address and sometimes
|
|
2134 // a thread pointer. We differentiate the two cases with OwnerIsThread.
|
|
2135 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
|
|
2136 // with the LSB of _owner. Another option would be to probablistically probe
|
|
2137 // the putative _owner->TypeTag value.
|
|
2138 //
|
|
2139 // Checking _thread_state isn't perfect. Even if the thread is
|
|
2140 // in_java it might be blocked on a page-fault or have been preempted
|
|
2141 // and sitting on a ready/dispatch queue. _thread state in conjunction
|
|
2142 // with schedctl.sc_state gives us a good picture of what the
|
|
2143 // thread is doing, however.
|
|
2144 //
|
|
2145 // TODO: check schedctl.sc_state.
|
|
2146 // We'll need to use SafeFetch32() to read from the schedctl block.
|
|
2147 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
|
|
2148 //
|
|
2149 // The return value from NotRunnable() is *advisory* -- the
|
|
2150 // result is based on sampling and is not necessarily coherent.
|
|
2151 // The caller must tolerate false-negative and false-positive errors.
|
|
2152 // Spinning, in general, is probabilistic anyway.
|
|
2153
|
|
2154
|
|
2155 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
|
|
2156 // Check either OwnerIsThread or ox->TypeTag == 2BAD.
|
|
2157 if (!OwnerIsThread) return 0 ;
|
|
2158
|
|
2159 if (ox == NULL) return 0 ;
|
|
2160
|
|
2161 // Avoid transitive spinning ...
|
|
2162 // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L.
|
|
2163 // Immediately after T1 acquires L it's possible that T2, also
|
|
2164 // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
|
|
2165 // This occurs transiently after T1 acquired L but before
|
|
2166 // T1 managed to clear T1.Stalled. T2 does not need to abort
|
|
2167 // its spin in this circumstance.
|
|
2168 intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
|
|
2169
|
|
2170 if (BlockedOn == 1) return 1 ;
|
|
2171 if (BlockedOn != 0) {
|
|
2172 return BlockedOn != intptr_t(this) && _owner == ox ;
|
|
2173 }
|
|
2174
|
|
2175 assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
|
|
2176 int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
|
|
2177 // consider also: jst != _thread_in_Java -- but that's overspecific.
|
|
2178 return jst == _thread_blocked || jst == _thread_in_native ;
|
|
2179 }
|
|
2180
|
|
2181
|
|
2182 // Adaptive spin-then-block - rational spinning
|
|
2183 //
|
|
2184 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
|
|
2185 // algorithm. On high order SMP systems it would be better to start with
|
|
2186 // a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH,
|
|
2187 // a contending thread could enqueue itself on the cxq and then spin locally
|
|
2188 // on a thread-specific variable such as its ParkEvent._Event flag.
|
|
2189 // That's left as an exercise for the reader. Note that global spinning is
|
|
2190 // not problematic on Niagara, as the L2$ serves the interconnect and has both
|
|
2191 // low latency and massive bandwidth.
|
|
2192 //
|
|
2193 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
|
|
2194 // acquisition attempts where we opt to spin -- at 100% and vary the spin count
|
|
2195 // (duration) or we can fix the count at approximately the duration of
|
|
2196 // a context switch and vary the frequency. Of course we could also
|
|
2197 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
|
|
2198 // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
|
|
2199 //
|
|
2200 // This implementation varies the duration "D", where D varies with
|
|
2201 // the success rate of recent spin attempts. (D is capped at approximately
|
|
2202 // length of a round-trip context switch). The success rate for recent
|
|
2203 // spin attempts is a good predictor of the success rate of future spin
|
|
2204 // attempts. The mechanism adapts automatically to varying critical
|
|
2205 // section length (lock modality), system load and degree of parallelism.
|
|
2206 // D is maintained per-monitor in _SpinDuration and is initialized
|
|
2207 // optimistically. Spin frequency is fixed at 100%.
|
|
2208 //
|
|
2209 // Note that _SpinDuration is volatile, but we update it without locks
|
|
2210 // or atomics. The code is designed so that _SpinDuration stays within
|
|
2211 // a reasonable range even in the presence of races. The arithmetic
|
|
2212 // operations on _SpinDuration are closed over the domain of legal values,
|
|
2213 // so at worst a race will install and older but still legal value.
|
|
2214 // At the very worst this introduces some apparent non-determinism.
|
|
2215 // We might spin when we shouldn't or vice-versa, but since the spin
|
|
2216 // count are relatively short, even in the worst case, the effect is harmless.
|
|
2217 //
|
|
2218 // Care must be taken that a low "D" value does not become an
|
|
2219 // an absorbing state. Transient spinning failures -- when spinning
|
|
2220 // is overall profitable -- should not cause the system to converge
|
|
2221 // on low "D" values. We want spinning to be stable and predictable
|
|
2222 // and fairly responsive to change and at the same time we don't want
|
|
2223 // it to oscillate, become metastable, be "too" non-deterministic,
|
|
2224 // or converge on or enter undesirable stable absorbing states.
|
|
2225 //
|
|
2226 // We implement a feedback-based control system -- using past behavior
|
|
2227 // to predict future behavior. We face two issues: (a) if the
|
|
2228 // input signal is random then the spin predictor won't provide optimal
|
|
2229 // results, and (b) if the signal frequency is too high then the control
|
|
2230 // system, which has some natural response lag, will "chase" the signal.
|
|
2231 // (b) can arise from multimodal lock hold times. Transient preemption
|
|
2232 // can also result in apparent bimodal lock hold times.
|
|
2233 // Although sub-optimal, neither condition is particularly harmful, as
|
|
2234 // in the worst-case we'll spin when we shouldn't or vice-versa.
|
|
2235 // The maximum spin duration is rather short so the failure modes aren't bad.
|
|
2236 // To be conservative, I've tuned the gain in system to bias toward
|
|
2237 // _not spinning. Relatedly, the system can sometimes enter a mode where it
|
|
2238 // "rings" or oscillates between spinning and not spinning. This happens
|
|
2239 // when spinning is just on the cusp of profitability, however, so the
|
|
2240 // situation is not dire. The state is benign -- there's no need to add
|
|
2241 // hysteresis control to damp the transition rate between spinning and
|
|
2242 // not spinning.
|
|
2243 //
|
|
2244 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
|
2245 //
|
|
2246 // Spin-then-block strategies ...
|
|
2247 //
|
|
2248 // Thoughts on ways to improve spinning :
|
|
2249 //
|
|
2250 // * Periodically call {psr_}getloadavg() while spinning, and
|
|
2251 // permit unbounded spinning if the load average is <
|
|
2252 // the number of processors. Beware, however, that getloadavg()
|
|
2253 // is exceptionally fast on solaris (about 1/10 the cost of a full
|
|
2254 // spin cycle, but quite expensive on linux. Beware also, that
|
|
2255 // multiple JVMs could "ring" or oscillate in a feedback loop.
|
|
2256 // Sufficient damping would solve that problem.
|
|
2257 //
|
|
2258 // * We currently use spin loops with iteration counters to approximate
|
|
2259 // spinning for some interval. Given the availability of high-precision
|
|
2260 // time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should
|
|
2261 // someday reimplement the spin loops to duration-based instead of iteration-based.
|
|
2262 //
|
|
2263 // * Don't spin if there are more than N = (CPUs/2) threads
|
|
2264 // currently spinning on the monitor (or globally).
|
|
2265 // That is, limit the number of concurrent spinners.
|
|
2266 // We might also limit the # of spinners in the JVM, globally.
|
|
2267 //
|
|
2268 // * If a spinning thread observes _owner change hands it should
|
|
2269 // abort the spin (and park immediately) or at least debit
|
|
2270 // the spin counter by a large "penalty".
|
|
2271 //
|
|
2272 // * Classically, the spin count is either K*(CPUs-1) or is a
|
|
2273 // simple constant that approximates the length of a context switch.
|
|
2274 // We currently use a value -- computed by a special utility -- that
|
|
2275 // approximates round-trip context switch times.
|
|
2276 //
|
|
2277 // * Normally schedctl_start()/_stop() is used to advise the kernel
|
|
2278 // to avoid preempting threads that are running in short, bounded
|
|
2279 // critical sections. We could use the schedctl hooks in an inverted
|
|
2280 // sense -- spinners would set the nopreempt flag, but poll the preempt
|
|
2281 // pending flag. If a spinner observed a pending preemption it'd immediately
|
|
2282 // abort the spin and park. As such, the schedctl service acts as
|
|
2283 // a preemption warning mechanism.
|
|
2284 //
|
|
2285 // * In lieu of spinning, if the system is running below saturation
|
|
2286 // (that is, loadavg() << #cpus), we can instead suppress futile
|
|
2287 // wakeup throttling, or even wake more than one successor at exit-time.
|
|
2288 // The net effect is largely equivalent to spinning. In both cases,
|
|
2289 // contending threads go ONPROC and opportunistically attempt to acquire
|
|
2290 // the lock, decreasing lock handover latency at the expense of wasted
|
|
2291 // cycles and context switching.
|
|
2292 //
|
|
2293 // * We might to spin less after we've parked as the thread will
|
|
2294 // have less $ and TLB affinity with the processor.
|
|
2295 // Likewise, we might spin less if we come ONPROC on a different
|
|
2296 // processor or after a long period (>> rechose_interval).
|
|
2297 //
|
|
2298 // * A table-driven state machine similar to Solaris' dispadmin scheduling
|
|
2299 // tables might be a better design. Instead of encoding information in
|
|
2300 // _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit,
|
|
2301 // discrete states. Success or failure during a spin would drive
|
|
2302 // state transitions, and each state node would contain a spin count.
|
|
2303 //
|
|
2304 // * If the processor is operating in a mode intended to conserve power
|
|
2305 // (such as Intel's SpeedStep) or to reduce thermal output (thermal
|
|
2306 // step-down mode) then the Java synchronization subsystem should
|
|
2307 // forgo spinning.
|
|
2308 //
|
|
2309 // * The minimum spin duration should be approximately the worst-case
|
|
2310 // store propagation latency on the platform. That is, the time
|
|
2311 // it takes a store on CPU A to become visible on CPU B, where A and
|
|
2312 // B are "distant".
|
|
2313 //
|
|
2314 // * We might want to factor a thread's priority in the spin policy.
|
|
2315 // Threads with a higher priority might spin for slightly longer.
|
|
2316 // Similarly, if we use back-off in the TATAS loop, lower priority
|
|
2317 // threads might back-off longer. We don't currently use a
|
|
2318 // thread's priority when placing it on the entry queue. We may
|
|
2319 // want to consider doing so in future releases.
|
|
2320 //
|
|
2321 // * We might transiently drop a thread's scheduling priority while it spins.
|
|
2322 // SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris
|
|
2323 // would suffice. We could even consider letting the thread spin indefinitely at
|
|
2324 // a depressed or "idle" priority. This brings up fairness issues, however --
|
|
2325 // in a saturated system a thread would with a reduced priority could languish
|
|
2326 // for extended periods on the ready queue.
|
|
2327 //
|
|
2328 // * While spinning try to use the otherwise wasted time to help the VM make
|
|
2329 // progress:
|
|
2330 //
|
|
2331 // -- YieldTo() the owner, if the owner is OFFPROC but ready
|
|
2332 // Done our remaining quantum directly to the ready thread.
|
|
2333 // This helps "push" the lock owner through the critical section.
|
|
2334 // It also tends to improve affinity/locality as the lock
|
|
2335 // "migrates" less frequently between CPUs.
|
|
2336 // -- Walk our own stack in anticipation of blocking. Memoize the roots.
|
|
2337 // -- Perform strand checking for other thread. Unpark potential strandees.
|
|
2338 // -- Help GC: trace or mark -- this would need to be a bounded unit of work.
|
|
2339 // Unfortunately this will pollute our $ and TLBs. Recall that we
|
|
2340 // spin to avoid context switching -- context switching has an
|
|
2341 // immediate cost in latency, a disruptive cost to other strands on a CMT
|
|
2342 // processor, and an amortized cost because of the D$ and TLB cache
|
|
2343 // reload transient when the thread comes back ONPROC and repopulates
|
|
2344 // $s and TLBs.
|
|
2345 // -- call getloadavg() to see if the system is saturated. It'd probably
|
|
2346 // make sense to call getloadavg() half way through the spin.
|
|
2347 // If the system isn't at full capacity the we'd simply reset
|
|
2348 // the spin counter to and extend the spin attempt.
|
|
2349 // -- Doug points out that we should use the same "helping" policy
|
|
2350 // in thread.yield().
|
|
2351 //
|
|
2352 // * Try MONITOR-MWAIT on systems that support those instructions.
|
|
2353 //
|
|
2354 // * The spin statistics that drive spin decisions & frequency are
|
|
2355 // maintained in the objectmonitor structure so if we deflate and reinflate
|
|
2356 // we lose spin state. In practice this is not usually a concern
|
|
2357 // as the default spin state after inflation is aggressive (optimistic)
|
|
2358 // and tends toward spinning. So in the worst case for a lock where
|
|
2359 // spinning is not profitable we may spin unnecessarily for a brief
|
|
2360 // period. But then again, if a lock is contended it'll tend not to deflate
|
|
2361 // in the first place.
|
|
2362
|
|
2363
|
|
2364 intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
|
|
2365 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
|
|
2366
|
|
2367 // Spinning: Fixed frequency (100%), vary duration
|
|
2368
|
|
2369 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
|
|
2370
|
|
2371 // Dumb, brutal spin. Good for comparative measurements against adaptive spinning.
|
|
2372 int ctr = Knob_FixedSpin ;
|
|
2373 if (ctr != 0) {
|
|
2374 while (--ctr >= 0) {
|
|
2375 if (TryLock (Self) > 0) return 1 ;
|
|
2376 SpinPause () ;
|
|
2377 }
|
|
2378 return 0 ;
|
|
2379 }
|
|
2380
|
|
2381 for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
|
|
2382 if (TryLock(Self) > 0) {
|
|
2383 // Increase _SpinDuration ...
|
|
2384 // Note that we don't clamp SpinDuration precisely at SpinLimit.
|
|
2385 // Raising _SpurDuration to the poverty line is key.
|
|
2386 int x = _SpinDuration ;
|
|
2387 if (x < Knob_SpinLimit) {
|
|
2388 if (x < Knob_Poverty) x = Knob_Poverty ;
|
|
2389 _SpinDuration = x + Knob_BonusB ;
|
|
2390 }
|
|
2391 return 1 ;
|
|
2392 }
|
|
2393 SpinPause () ;
|
|
2394 }
|
|
2395
|
|
2396 // Admission control - verify preconditions for spinning
|
|
2397 //
|
|
2398 // We always spin a little bit, just to prevent _SpinDuration == 0 from
|
|
2399 // becoming an absorbing state. Put another way, we spin briefly to
|
|
2400 // sample, just in case the system load, parallelism, contention, or lock
|
|
2401 // modality changed.
|
|
2402 //
|
|
2403 // Consider the following alternative:
|
|
2404 // Periodically set _SpinDuration = _SpinLimit and try a long/full
|
|
2405 // spin attempt. "Periodically" might mean after a tally of
|
|
2406 // the # of failed spin attempts (or iterations) reaches some threshold.
|
|
2407 // This takes us into the realm of 1-out-of-N spinning, where we
|
|
2408 // hold the duration constant but vary the frequency.
|
|
2409
|
|
2410 ctr = _SpinDuration ;
|
|
2411 if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
|
|
2412 if (ctr <= 0) return 0 ;
|
|
2413
|
|
2414 if (Knob_SuccRestrict && _succ != NULL) return 0 ;
|
|
2415 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
|
|
2416 TEVENT (Spin abort - notrunnable [TOP]);
|
|
2417 return 0 ;
|
|
2418 }
|
|
2419
|
|
2420 int MaxSpin = Knob_MaxSpinners ;
|
|
2421 if (MaxSpin >= 0) {
|
|
2422 if (_Spinner > MaxSpin) {
|
|
2423 TEVENT (Spin abort -- too many spinners) ;
|
|
2424 return 0 ;
|
|
2425 }
|
|
2426 // Slighty racy, but benign ...
|
|
2427 Adjust (&_Spinner, 1) ;
|
|
2428 }
|
|
2429
|
|
2430 // We're good to spin ... spin ingress.
|
|
2431 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
|
|
2432 // when preparing to LD...CAS _owner, etc and the CAS is likely
|
|
2433 // to succeed.
|
|
2434 int hits = 0 ;
|
|
2435 int msk = 0 ;
|
|
2436 int caspty = Knob_CASPenalty ;
|
|
2437 int oxpty = Knob_OXPenalty ;
|
|
2438 int sss = Knob_SpinSetSucc ;
|
|
2439 if (sss && _succ == NULL ) _succ = Self ;
|
|
2440 Thread * prv = NULL ;
|
|
2441
|
|
2442 // There are three ways to exit the following loop:
|
|
2443 // 1. A successful spin where this thread has acquired the lock.
|
|
2444 // 2. Spin failure with prejudice
|
|
2445 // 3. Spin failure without prejudice
|
|
2446
|
|
2447 while (--ctr >= 0) {
|
|
2448
|
|
2449 // Periodic polling -- Check for pending GC
|
|
2450 // Threads may spin while they're unsafe.
|
|
2451 // We don't want spinning threads to delay the JVM from reaching
|
|
2452 // a stop-the-world safepoint or to steal cycles from GC.
|
|
2453 // If we detect a pending safepoint we abort in order that
|
|
2454 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
|
|
2455 // this thread, if safe, doesn't steal cycles from GC.
|
|
2456 // This is in keeping with the "no loitering in runtime" rule.
|
|
2457 // We periodically check to see if there's a safepoint pending.
|
|
2458 if ((ctr & 0xFF) == 0) {
|
|
2459 if (SafepointSynchronize::do_call_back()) {
|
|
2460 TEVENT (Spin: safepoint) ;
|
|
2461 goto Abort ; // abrupt spin egress
|
|
2462 }
|
|
2463 if (Knob_UsePause & 1) SpinPause () ;
|
|
2464
|
|
2465 int (*scb)(intptr_t,int) = SpinCallbackFunction ;
|
|
2466 if (hits > 50 && scb != NULL) {
|
|
2467 int abend = (*scb)(SpinCallbackArgument, 0) ;
|
|
2468 }
|
|
2469 }
|
|
2470
|
|
2471 if (Knob_UsePause & 2) SpinPause() ;
|
|
2472
|
|
2473 // Exponential back-off ... Stay off the bus to reduce coherency traffic.
|
|
2474 // This is useful on classic SMP systems, but is of less utility on
|
|
2475 // N1-style CMT platforms.
|
|
2476 //
|
|
2477 // Trade-off: lock acquisition latency vs coherency bandwidth.
|
|
2478 // Lock hold times are typically short. A histogram
|
|
2479 // of successful spin attempts shows that we usually acquire
|
|
2480 // the lock early in the spin. That suggests we want to
|
|
2481 // sample _owner frequently in the early phase of the spin,
|
|
2482 // but then back-off and sample less frequently as the spin
|
|
2483 // progresses. The back-off makes a good citizen on SMP big
|
|
2484 // SMP systems. Oversampling _owner can consume excessive
|
|
2485 // coherency bandwidth. Relatedly, if we _oversample _owner we
|
|
2486 // can inadvertently interfere with the the ST m->owner=null.
|
|
2487 // executed by the lock owner.
|
|
2488 if (ctr & msk) continue ;
|
|
2489 ++hits ;
|
|
2490 if ((hits & 0xF) == 0) {
|
|
2491 // The 0xF, above, corresponds to the exponent.
|
|
2492 // Consider: (msk+1)|msk
|
|
2493 msk = ((msk << 2)|3) & BackOffMask ;
|
|
2494 }
|
|
2495
|
|
2496 // Probe _owner with TATAS
|
|
2497 // If this thread observes the monitor transition or flicker
|
|
2498 // from locked to unlocked to locked, then the odds that this
|
|
2499 // thread will acquire the lock in this spin attempt go down
|
|
2500 // considerably. The same argument applies if the CAS fails
|
|
2501 // or if we observe _owner change from one non-null value to
|
|
2502 // another non-null value. In such cases we might abort
|
|
2503 // the spin without prejudice or apply a "penalty" to the
|
|
2504 // spin count-down variable "ctr", reducing it by 100, say.
|
|
2505
|
|
2506 Thread * ox = (Thread *) _owner ;
|
|
2507 if (ox == NULL) {
|
|
2508 ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
|
|
2509 if (ox == NULL) {
|
|
2510 // The CAS succeeded -- this thread acquired ownership
|
|
2511 // Take care of some bookkeeping to exit spin state.
|
|
2512 if (sss && _succ == Self) {
|
|
2513 _succ = NULL ;
|
|
2514 }
|
|
2515 if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
|
|
2516
|
|
2517 // Increase _SpinDuration :
|
|
2518 // The spin was successful (profitable) so we tend toward
|
|
2519 // longer spin attempts in the future.
|
|
2520 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
|
|
2521 // If we acquired the lock early in the spin cycle it
|
|
2522 // makes sense to increase _SpinDuration proportionally.
|
|
2523 // Note that we don't clamp SpinDuration precisely at SpinLimit.
|
|
2524 int x = _SpinDuration ;
|
|
2525 if (x < Knob_SpinLimit) {
|
|
2526 if (x < Knob_Poverty) x = Knob_Poverty ;
|
|
2527 _SpinDuration = x + Knob_Bonus ;
|
|
2528 }
|
|
2529 return 1 ;
|
|
2530 }
|
|
2531
|
|
2532 // The CAS failed ... we can take any of the following actions:
|
|
2533 // * penalize: ctr -= Knob_CASPenalty
|
|
2534 // * exit spin with prejudice -- goto Abort;
|
|
2535 // * exit spin without prejudice.
|
|
2536 // * Since CAS is high-latency, retry again immediately.
|
|
2537 prv = ox ;
|
|
2538 TEVENT (Spin: cas failed) ;
|
|
2539 if (caspty == -2) break ;
|
|
2540 if (caspty == -1) goto Abort ;
|
|
2541 ctr -= caspty ;
|
|
2542 continue ;
|
|
2543 }
|
|
2544
|
|
2545 // Did lock ownership change hands ?
|
|
2546 if (ox != prv && prv != NULL ) {
|
|
2547 TEVENT (spin: Owner changed)
|
|
2548 if (oxpty == -2) break ;
|
|
2549 if (oxpty == -1) goto Abort ;
|
|
2550 ctr -= oxpty ;
|
|
2551 }
|
|
2552 prv = ox ;
|
|
2553
|
|
2554 // Abort the spin if the owner is not executing.
|
|
2555 // The owner must be executing in order to drop the lock.
|
|
2556 // Spinning while the owner is OFFPROC is idiocy.
|
|
2557 // Consider: ctr -= RunnablePenalty ;
|
|
2558 if (Knob_OState && NotRunnable (Self, ox)) {
|
|
2559 TEVENT (Spin abort - notrunnable);
|
|
2560 goto Abort ;
|
|
2561 }
|
|
2562 if (sss && _succ == NULL ) _succ = Self ;
|
|
2563 }
|
|
2564
|
|
2565 // Spin failed with prejudice -- reduce _SpinDuration.
|
|
2566 // TODO: Use an AIMD-like policy to adjust _SpinDuration.
|
|
2567 // AIMD is globally stable.
|
|
2568 TEVENT (Spin failure) ;
|
|
2569 {
|
|
2570 int x = _SpinDuration ;
|
|
2571 if (x > 0) {
|
|
2572 // Consider an AIMD scheme like: x -= (x >> 3) + 100
|
|
2573 // This is globally sample and tends to damp the response.
|
|
2574 x -= Knob_Penalty ;
|
|
2575 if (x < 0) x = 0 ;
|
|
2576 _SpinDuration = x ;
|
|
2577 }
|
|
2578 }
|
|
2579
|
|
2580 Abort:
|
|
2581 if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
|
|
2582 if (sss && _succ == Self) {
|
|
2583 _succ = NULL ;
|
|
2584 // Invariant: after setting succ=null a contending thread
|
|
2585 // must recheck-retry _owner before parking. This usually happens
|
|
2586 // in the normal usage of TrySpin(), but it's safest
|
|
2587 // to make TrySpin() as foolproof as possible.
|
|
2588 OrderAccess::fence() ;
|
|
2589 if (TryLock(Self) > 0) return 1 ;
|
|
2590 }
|
|
2591 return 0 ;
|
|
2592 }
|
|
2593
|
|
2594 #define TrySpin TrySpin_VaryDuration
|
|
2595
|
|
2596 static void DeferredInitialize () {
|
|
2597 if (InitDone > 0) return ;
|
|
2598 if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
|
|
2599 while (InitDone != 1) ;
|
|
2600 return ;
|
|
2601 }
|
|
2602
|
|
2603 // One-shot global initialization ...
|
|
2604 // The initialization is idempotent, so we don't need locks.
|
|
2605 // In the future consider doing this via os::init_2().
|
|
2606 // SyncKnobs consist of <Key>=<Value> pairs in the style
|
|
2607 // of environment variables. Start by converting ':' to NUL.
|
|
2608
|
|
2609 if (SyncKnobs == NULL) SyncKnobs = "" ;
|
|
2610
|
|
2611 size_t sz = strlen (SyncKnobs) ;
|
|
2612 char * knobs = (char *) malloc (sz + 2) ;
|
|
2613 if (knobs == NULL) {
|
|
2614 vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
|
|
2615 guarantee (0, "invariant") ;
|
|
2616 }
|
|
2617 strcpy (knobs, SyncKnobs) ;
|
|
2618 knobs[sz+1] = 0 ;
|
|
2619 for (char * p = knobs ; *p ; p++) {
|
|
2620 if (*p == ':') *p = 0 ;
|
|
2621 }
|
|
2622
|
|
2623 #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
|
|
2624 SETKNOB(ReportSettings) ;
|
|
2625 SETKNOB(Verbose) ;
|
|
2626 SETKNOB(FixedSpin) ;
|
|
2627 SETKNOB(SpinLimit) ;
|
|
2628 SETKNOB(SpinBase) ;
|
|
2629 SETKNOB(SpinBackOff);
|
|
2630 SETKNOB(CASPenalty) ;
|
|
2631 SETKNOB(OXPenalty) ;
|
|
2632 SETKNOB(LogSpins) ;
|
|
2633 SETKNOB(SpinSetSucc) ;
|
|
2634 SETKNOB(SuccEnabled) ;
|
|
2635 SETKNOB(SuccRestrict) ;
|
|
2636 SETKNOB(Penalty) ;
|
|
2637 SETKNOB(Bonus) ;
|
|
2638 SETKNOB(BonusB) ;
|
|
2639 SETKNOB(Poverty) ;
|
|
2640 SETKNOB(SpinAfterFutile) ;
|
|
2641 SETKNOB(UsePause) ;
|
|
2642 SETKNOB(SpinEarly) ;
|
|
2643 SETKNOB(OState) ;
|
|
2644 SETKNOB(MaxSpinners) ;
|
|
2645 SETKNOB(PreSpin) ;
|
|
2646 SETKNOB(ExitPolicy) ;
|
|
2647 SETKNOB(QMode);
|
|
2648 SETKNOB(ResetEvent) ;
|
|
2649 SETKNOB(MoveNotifyee) ;
|
|
2650 SETKNOB(FastHSSEC) ;
|
|
2651 #undef SETKNOB
|
|
2652
|
|
2653 if (os::is_MP()) {
|
|
2654 BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
|
|
2655 if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
|
|
2656 // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
|
|
2657 } else {
|
|
2658 Knob_SpinLimit = 0 ;
|
|
2659 Knob_SpinBase = 0 ;
|
|
2660 Knob_PreSpin = 0 ;
|
|
2661 Knob_FixedSpin = -1 ;
|
|
2662 }
|
|
2663
|
|
2664 if (Knob_LogSpins == 0) {
|
|
2665 ObjectSynchronizer::_sync_FailedSpins = NULL ;
|
|
2666 }
|
|
2667
|
|
2668 free (knobs) ;
|
|
2669 OrderAccess::fence() ;
|
|
2670 InitDone = 1 ;
|
|
2671 }
|
|
2672
|
|
2673 // Theory of operations -- Monitors lists, thread residency, etc:
|
|
2674 //
|
|
2675 // * A thread acquires ownership of a monitor by successfully
|
|
2676 // CAS()ing the _owner field from null to non-null.
|
|
2677 //
|
|
2678 // * Invariant: A thread appears on at most one monitor list --
|
|
2679 // cxq, EntryList or WaitSet -- at any one time.
|
|
2680 //
|
|
2681 // * Contending threads "push" themselves onto the cxq with CAS
|
|
2682 // and then spin/park.
|
|
2683 //
|
|
2684 // * After a contending thread eventually acquires the lock it must
|
|
2685 // dequeue itself from either the EntryList or the cxq.
|
|
2686 //
|
|
2687 // * The exiting thread identifies and unparks an "heir presumptive"
|
|
2688 // tentative successor thread on the EntryList. Critically, the
|
|
2689 // exiting thread doesn't unlink the successor thread from the EntryList.
|
|
2690 // After having been unparked, the wakee will recontend for ownership of
|
|
2691 // the monitor. The successor (wakee) will either acquire the lock or
|
|
2692 // re-park itself.
|
|
2693 //
|
|
2694 // Succession is provided for by a policy of competitive handoff.
|
|
2695 // The exiting thread does _not_ grant or pass ownership to the
|
|
2696 // successor thread. (This is also referred to as "handoff" succession").
|
|
2697 // Instead the exiting thread releases ownership and possibly wakes
|
|
2698 // a successor, so the successor can (re)compete for ownership of the lock.
|
|
2699 // If the EntryList is empty but the cxq is populated the exiting
|
|
2700 // thread will drain the cxq into the EntryList. It does so by
|
|
2701 // by detaching the cxq (installing null with CAS) and folding
|
|
2702 // the threads from the cxq into the EntryList. The EntryList is
|
|
2703 // doubly linked, while the cxq is singly linked because of the
|
|
2704 // CAS-based "push" used to enqueue recently arrived threads (RATs).
|
|
2705 //
|
|
2706 // * Concurrency invariants:
|
|
2707 //
|
|
2708 // -- only the monitor owner may access or mutate the EntryList.
|
|
2709 // The mutex property of the monitor itself protects the EntryList
|
|
2710 // from concurrent interference.
|
|
2711 // -- Only the monitor owner may detach the cxq.
|
|
2712 //
|
|
2713 // * The monitor entry list operations avoid locks, but strictly speaking
|
|
2714 // they're not lock-free. Enter is lock-free, exit is not.
|
|
2715 // See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
|
|
2716 //
|
|
2717 // * The cxq can have multiple concurrent "pushers" but only one concurrent
|
|
2718 // detaching thread. This mechanism is immune from the ABA corruption.
|
|
2719 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
|
|
2720 //
|
|
2721 // * Taken together, the cxq and the EntryList constitute or form a
|
|
2722 // single logical queue of threads stalled trying to acquire the lock.
|
|
2723 // We use two distinct lists to improve the odds of a constant-time
|
|
2724 // dequeue operation after acquisition (in the ::enter() epilog) and
|
|
2725 // to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm).
|
|
2726 // A key desideratum is to minimize queue & monitor metadata manipulation
|
|
2727 // that occurs while holding the monitor lock -- that is, we want to
|
|
2728 // minimize monitor lock holds times. Note that even a small amount of
|
|
2729 // fixed spinning will greatly reduce the # of enqueue-dequeue operations
|
|
2730 // on EntryList|cxq. That is, spinning relieves contention on the "inner"
|
|
2731 // locks and monitor metadata.
|
|
2732 //
|
|
2733 // Cxq points to the the set of Recently Arrived Threads attempting entry.
|
|
2734 // Because we push threads onto _cxq with CAS, the RATs must take the form of
|
|
2735 // a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when
|
|
2736 // the unlocking thread notices that EntryList is null but _cxq is != null.
|
|
2737 //
|
|
2738 // The EntryList is ordered by the prevailing queue discipline and
|
|
2739 // can be organized in any convenient fashion, such as a doubly-linked list or
|
|
2740 // a circular doubly-linked list. Critically, we want insert and delete operations
|
|
2741 // to operate in constant-time. If we need a priority queue then something akin
|
|
2742 // to Solaris' sleepq would work nicely. Viz.,
|
|
2743 // http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
|
|
2744 // Queue discipline is enforced at ::exit() time, when the unlocking thread
|
|
2745 // drains the cxq into the EntryList, and orders or reorders the threads on the
|
|
2746 // EntryList accordingly.
|
|
2747 //
|
|
2748 // Barring "lock barging", this mechanism provides fair cyclic ordering,
|
|
2749 // somewhat similar to an elevator-scan.
|
|
2750 //
|
|
2751 // * The monitor synchronization subsystem avoids the use of native
|
|
2752 // synchronization primitives except for the narrow platform-specific
|
|
2753 // park-unpark abstraction. See the comments in os_solaris.cpp regarding
|
|
2754 // the semantics of park-unpark. Put another way, this monitor implementation
|
|
2755 // depends only on atomic operations and park-unpark. The monitor subsystem
|
|
2756 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
|
|
2757 // underlying OS manages the READY<->RUN transitions.
|
|
2758 //
|
|
2759 // * Waiting threads reside on the WaitSet list -- wait() puts
|
|
2760 // the caller onto the WaitSet.
|
|
2761 //
|
|
2762 // * notify() or notifyAll() simply transfers threads from the WaitSet to
|
|
2763 // either the EntryList or cxq. Subsequent exit() operations will
|
|
2764 // unpark the notifyee. Unparking a notifee in notify() is inefficient -
|
|
2765 // it's likely the notifyee would simply impale itself on the lock held
|
|
2766 // by the notifier.
|
|
2767 //
|
|
2768 // * An interesting alternative is to encode cxq as (List,LockByte) where
|
|
2769 // the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary
|
|
2770 // variable, like _recursions, in the scheme. The threads or Events that form
|
|
2771 // the list would have to be aligned in 256-byte addresses. A thread would
|
|
2772 // try to acquire the lock or enqueue itself with CAS, but exiting threads
|
|
2773 // could use a 1-0 protocol and simply STB to set the LockByte to 0.
|
|
2774 // Note that is is *not* word-tearing, but it does presume that full-word
|
|
2775 // CAS operations are coherent with intermix with STB operations. That's true
|
|
2776 // on most common processors.
|
|
2777 //
|
|
2778 // * See also http://blogs.sun.com/dave
|
|
2779
|
|
2780
|
|
2781 void ATTR ObjectMonitor::EnterI (TRAPS) {
|
|
2782 Thread * Self = THREAD ;
|
|
2783 assert (Self->is_Java_thread(), "invariant") ;
|
|
2784 assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ;
|
|
2785
|
|
2786 // Try the lock - TATAS
|
|
2787 if (TryLock (Self) > 0) {
|
|
2788 assert (_succ != Self , "invariant") ;
|
|
2789 assert (_owner == Self , "invariant") ;
|
|
2790 assert (_Responsible != Self , "invariant") ;
|
|
2791 return ;
|
|
2792 }
|
|
2793
|
|
2794 DeferredInitialize () ;
|
|
2795
|
|
2796 // We try one round of spinning *before* enqueueing Self.
|
|
2797 //
|
|
2798 // If the _owner is ready but OFFPROC we could use a YieldTo()
|
|
2799 // operation to donate the remainder of this thread's quantum
|
|
2800 // to the owner. This has subtle but beneficial affinity
|
|
2801 // effects.
|
|
2802
|
|
2803 if (TrySpin (Self) > 0) {
|
|
2804 assert (_owner == Self , "invariant") ;
|
|
2805 assert (_succ != Self , "invariant") ;
|
|
2806 assert (_Responsible != Self , "invariant") ;
|
|
2807 return ;
|
|
2808 }
|
|
2809
|
|
2810 // The Spin failed -- Enqueue and park the thread ...
|
|
2811 assert (_succ != Self , "invariant") ;
|
|
2812 assert (_owner != Self , "invariant") ;
|
|
2813 assert (_Responsible != Self , "invariant") ;
|
|
2814
|
|
2815 // Enqueue "Self" on ObjectMonitor's _cxq.
|
|
2816 //
|
|
2817 // Node acts as a proxy for Self.
|
|
2818 // As an aside, if were to ever rewrite the synchronization code mostly
|
|
2819 // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
|
|
2820 // Java objects. This would avoid awkward lifecycle and liveness issues,
|
|
2821 // as well as eliminate a subset of ABA issues.
|
|
2822 // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
|
|
2823 //
|
|
2824
|
|
2825 ObjectWaiter node(Self) ;
|
|
2826 Self->_ParkEvent->reset() ;
|
|
2827 node._prev = (ObjectWaiter *) 0xBAD ;
|
|
2828 node.TState = ObjectWaiter::TS_CXQ ;
|
|
2829
|
|
2830 // Push "Self" onto the front of the _cxq.
|
|
2831 // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
|
|
2832 // Note that spinning tends to reduce the rate at which threads
|
|
2833 // enqueue and dequeue on EntryList|cxq.
|
|
2834 ObjectWaiter * nxt ;
|
|
2835 for (;;) {
|
|
2836 node._next = nxt = _cxq ;
|
|
2837 if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
|
|
2838
|
|
2839 // Interference - the CAS failed because _cxq changed. Just retry.
|
|
2840 // As an optional optimization we retry the lock.
|
|
2841 if (TryLock (Self) > 0) {
|
|
2842 assert (_succ != Self , "invariant") ;
|
|
2843 assert (_owner == Self , "invariant") ;
|
|
2844 assert (_Responsible != Self , "invariant") ;
|
|
2845 return ;
|
|
2846 }
|
|
2847 }
|
|
2848
|
|
2849 // Check for cxq|EntryList edge transition to non-null. This indicates
|
|
2850 // the onset of contention. While contention persists exiting threads
|
|
2851 // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit
|
|
2852 // operations revert to the faster 1-0 mode. This enter operation may interleave
|
|
2853 // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
|
|
2854 // arrange for one of the contending thread to use a timed park() operations
|
|
2855 // to detect and recover from the race. (Stranding is form of progress failure
|
|
2856 // where the monitor is unlocked but all the contending threads remain parked).
|
|
2857 // That is, at least one of the contended threads will periodically poll _owner.
|
|
2858 // One of the contending threads will become the designated "Responsible" thread.
|
|
2859 // The Responsible thread uses a timed park instead of a normal indefinite park
|
|
2860 // operation -- it periodically wakes and checks for and recovers from potential
|
|
2861 // strandings admitted by 1-0 exit operations. We need at most one Responsible
|
|
2862 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
|
|
2863 // be responsible for a monitor.
|
|
2864 //
|
|
2865 // Currently, one of the contended threads takes on the added role of "Responsible".
|
|
2866 // A viable alternative would be to use a dedicated "stranding checker" thread
|
|
2867 // that periodically iterated over all the threads (or active monitors) and unparked
|
|
2868 // successors where there was risk of stranding. This would help eliminate the
|
|
2869 // timer scalability issues we see on some platforms as we'd only have one thread
|
|
2870 // -- the checker -- parked on a timer.
|
|
2871
|
|
2872 if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
|
|
2873 // Try to assume the role of responsible thread for the monitor.
|
|
2874 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
|
|
2875 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
|
|
2876 }
|
|
2877
|
|
2878 // The lock have been released while this thread was occupied queueing
|
|
2879 // itself onto _cxq. To close the race and avoid "stranding" and
|
|
2880 // progress-liveness failure we must resample-retry _owner before parking.
|
|
2881 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
|
|
2882 // In this case the ST-MEMBAR is accomplished with CAS().
|
|
2883 //
|
|
2884 // TODO: Defer all thread state transitions until park-time.
|
|
2885 // Since state transitions are heavy and inefficient we'd like
|
|
2886 // to defer the state transitions until absolutely necessary,
|
|
2887 // and in doing so avoid some transitions ...
|
|
2888
|
|
2889 TEVENT (Inflated enter - Contention) ;
|
|
2890 int nWakeups = 0 ;
|
|
2891 int RecheckInterval = 1 ;
|
|
2892
|
|
2893 for (;;) {
|
|
2894
|
|
2895 if (TryLock (Self) > 0) break ;
|
|
2896 assert (_owner != Self, "invariant") ;
|
|
2897
|
|
2898 if ((SyncFlags & 2) && _Responsible == NULL) {
|
|
2899 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
|
|
2900 }
|
|
2901
|
|
2902 // park self
|
|
2903 if (_Responsible == Self || (SyncFlags & 1)) {
|
|
2904 TEVENT (Inflated enter - park TIMED) ;
|
|
2905 Self->_ParkEvent->park ((jlong) RecheckInterval) ;
|
|
2906 // Increase the RecheckInterval, but clamp the value.
|
|
2907 RecheckInterval *= 8 ;
|
|
2908 if (RecheckInterval > 1000) RecheckInterval = 1000 ;
|
|
2909 } else {
|
|
2910 TEVENT (Inflated enter - park UNTIMED) ;
|
|
2911 Self->_ParkEvent->park() ;
|
|
2912 }
|
|
2913
|
|
2914 if (TryLock(Self) > 0) break ;
|
|
2915
|
|
2916 // The lock is still contested.
|
|
2917 // Keep a tally of the # of futile wakeups.
|
|
2918 // Note that the counter is not protected by a lock or updated by atomics.
|
|
2919 // That is by design - we trade "lossy" counters which are exposed to
|
|
2920 // races during updates for a lower probe effect.
|
|
2921 TEVENT (Inflated enter - Futile wakeup) ;
|
|
2922 if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
|
|
2923 ObjectSynchronizer::_sync_FutileWakeups->inc() ;
|
|
2924 }
|
|
2925 ++ nWakeups ;
|
|
2926
|
|
2927 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
|
|
2928 // We can defer clearing _succ until after the spin completes
|
|
2929 // TrySpin() must tolerate being called with _succ == Self.
|
|
2930 // Try yet another round of adaptive spinning.
|
|
2931 if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
|
|
2932
|
|
2933 // We can find that we were unpark()ed and redesignated _succ while
|
|
2934 // we were spinning. That's harmless. If we iterate and call park(),
|
|
2935 // park() will consume the event and return immediately and we'll
|
|
2936 // just spin again. This pattern can repeat, leaving _succ to simply
|
|
2937 // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks().
|
|
2938 // Alternately, we can sample fired() here, and if set, forgo spinning
|
|
2939 // in the next iteration.
|
|
2940
|
|
2941 if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
|
|
2942 Self->_ParkEvent->reset() ;
|
|
2943 OrderAccess::fence() ;
|
|
2944 }
|
|
2945 if (_succ == Self) _succ = NULL ;
|
|
2946
|
|
2947 // Invariant: after clearing _succ a thread *must* retry _owner before parking.
|
|
2948 OrderAccess::fence() ;
|
|
2949 }
|
|
2950
|
|
2951 // Egress :
|
|
2952 // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
|
|
2953 // Normally we'll find Self on the EntryList .
|
|
2954 // From the perspective of the lock owner (this thread), the
|
|
2955 // EntryList is stable and cxq is prepend-only.
|
|
2956 // The head of cxq is volatile but the interior is stable.
|
|
2957 // In addition, Self.TState is stable.
|
|
2958
|
|
2959 assert (_owner == Self , "invariant") ;
|
|
2960 assert (object() != NULL , "invariant") ;
|
|
2961 // I'd like to write:
|
|
2962 // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
|
|
2963 // but as we're at a safepoint that's not safe.
|
|
2964
|
|
2965 UnlinkAfterAcquire (Self, &node) ;
|
|
2966 if (_succ == Self) _succ = NULL ;
|
|
2967
|
|
2968 assert (_succ != Self, "invariant") ;
|
|
2969 if (_Responsible == Self) {
|
|
2970 _Responsible = NULL ;
|
|
2971 // Dekker pivot-point.
|
|
2972 // Consider OrderAccess::storeload() here
|
|
2973
|
|
2974 // We may leave threads on cxq|EntryList without a designated
|
|
2975 // "Responsible" thread. This is benign. When this thread subsequently
|
|
2976 // exits the monitor it can "see" such preexisting "old" threads --
|
|
2977 // threads that arrived on the cxq|EntryList before the fence, above --
|
|
2978 // by LDing cxq|EntryList. Newly arrived threads -- that is, threads
|
|
2979 // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
|
|
2980 // non-null and elect a new "Responsible" timer thread.
|
|
2981 //
|
|
2982 // This thread executes:
|
|
2983 // ST Responsible=null; MEMBAR (in enter epilog - here)
|
|
2984 // LD cxq|EntryList (in subsequent exit)
|
|
2985 //
|
|
2986 // Entering threads in the slow/contended path execute:
|
|
2987 // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
|
|
2988 // The (ST cxq; MEMBAR) is accomplished with CAS().
|
|
2989 //
|
|
2990 // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
|
|
2991 // exit operation from floating above the ST Responsible=null.
|
|
2992 //
|
|
2993 // In *practice* however, EnterI() is always followed by some atomic
|
|
2994 // operation such as the decrement of _count in ::enter(). Those atomics
|
|
2995 // obviate the need for the explicit MEMBAR, above.
|
|
2996 }
|
|
2997
|
|
2998 // We've acquired ownership with CAS().
|
|
2999 // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
|
|
3000 // But since the CAS() this thread may have also stored into _succ,
|
|
3001 // EntryList, cxq or Responsible. These meta-data updates must be
|
|
3002 // visible __before this thread subsequently drops the lock.
|
|
3003 // Consider what could occur if we didn't enforce this constraint --
|
|
3004 // STs to monitor meta-data and user-data could reorder with (become
|
|
3005 // visible after) the ST in exit that drops ownership of the lock.
|
|
3006 // Some other thread could then acquire the lock, but observe inconsistent
|
|
3007 // or old monitor meta-data and heap data. That violates the JMM.
|
|
3008 // To that end, the 1-0 exit() operation must have at least STST|LDST
|
|
3009 // "release" barrier semantics. Specifically, there must be at least a
|
|
3010 // STST|LDST barrier in exit() before the ST of null into _owner that drops
|
|
3011 // the lock. The barrier ensures that changes to monitor meta-data and data
|
|
3012 // protected by the lock will be visible before we release the lock, and
|
|
3013 // therefore before some other thread (CPU) has a chance to acquire the lock.
|
|
3014 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
|
|
3015 //
|
|
3016 // Critically, any prior STs to _succ or EntryList must be visible before
|
|
3017 // the ST of null into _owner in the *subsequent* (following) corresponding
|
|
3018 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
|
|
3019 // execute a serializing instruction.
|
|
3020
|
|
3021 if (SyncFlags & 8) {
|
|
3022 OrderAccess::fence() ;
|
|
3023 }
|
|
3024 return ;
|
|
3025 }
|
|
3026
|
|
3027 // ExitSuspendEquivalent:
|
|
3028 // A faster alternate to handle_special_suspend_equivalent_condition()
|
|
3029 //
|
|
3030 // handle_special_suspend_equivalent_condition() unconditionally
|
|
3031 // acquires the SR_lock. On some platforms uncontended MutexLocker()
|
|
3032 // operations have high latency. Note that in ::enter() we call HSSEC
|
|
3033 // while holding the monitor, so we effectively lengthen the critical sections.
|
|
3034 //
|
|
3035 // There are a number of possible solutions:
|
|
3036 //
|
|
3037 // A. To ameliorate the problem we might also defer state transitions
|
|
3038 // to as late as possible -- just prior to parking.
|
|
3039 // Given that, we'd call HSSEC after having returned from park(),
|
|
3040 // but before attempting to acquire the monitor. This is only a
|
|
3041 // partial solution. It avoids calling HSSEC while holding the
|
|
3042 // monitor (good), but it still increases successor reacquisition latency --
|
|
3043 // the interval between unparking a successor and the time the successor
|
|
3044 // resumes and retries the lock. See ReenterI(), which defers state transitions.
|
|
3045 // If we use this technique we can also avoid EnterI()-exit() loop
|
|
3046 // in ::enter() where we iteratively drop the lock and then attempt
|
|
3047 // to reacquire it after suspending.
|
|
3048 //
|
|
3049 // B. In the future we might fold all the suspend bits into a
|
|
3050 // composite per-thread suspend flag and then update it with CAS().
|
|
3051 // Alternately, a Dekker-like mechanism with multiple variables
|
|
3052 // would suffice:
|
|
3053 // ST Self->_suspend_equivalent = false
|
|
3054 // MEMBAR
|
|
3055 // LD Self_>_suspend_flags
|
|
3056 //
|
|
3057
|
|
3058
|
|
3059 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
|
|
3060 int Mode = Knob_FastHSSEC ;
|
|
3061 if (Mode && !jSelf->is_external_suspend()) {
|
|
3062 assert (jSelf->is_suspend_equivalent(), "invariant") ;
|
|
3063 jSelf->clear_suspend_equivalent() ;
|
|
3064 if (2 == Mode) OrderAccess::storeload() ;
|
|
3065 if (!jSelf->is_external_suspend()) return false ;
|
|
3066 // We raced a suspension -- fall thru into the slow path
|
|
3067 TEVENT (ExitSuspendEquivalent - raced) ;
|
|
3068 jSelf->set_suspend_equivalent() ;
|
|
3069 }
|
|
3070 return jSelf->handle_special_suspend_equivalent_condition() ;
|
|
3071 }
|
|
3072
|
|
3073
|
|
3074 // ReenterI() is a specialized inline form of the latter half of the
|
|
3075 // contended slow-path from EnterI(). We use ReenterI() only for
|
|
3076 // monitor reentry in wait().
|
|
3077 //
|
|
3078 // In the future we should reconcile EnterI() and ReenterI(), adding
|
|
3079 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
|
|
3080 // loop accordingly.
|
|
3081
|
|
3082 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
|
|
3083 assert (Self != NULL , "invariant") ;
|
|
3084 assert (SelfNode != NULL , "invariant") ;
|
|
3085 assert (SelfNode->_thread == Self , "invariant") ;
|
|
3086 assert (_waiters > 0 , "invariant") ;
|
|
3087 assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
|
|
3088 assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
|
|
3089 JavaThread * jt = (JavaThread *) Self ;
|
|
3090
|
|
3091 int nWakeups = 0 ;
|
|
3092 for (;;) {
|
|
3093 ObjectWaiter::TStates v = SelfNode->TState ;
|
|
3094 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
|
|
3095 assert (_owner != Self, "invariant") ;
|
|
3096
|
|
3097 if (TryLock (Self) > 0) break ;
|
|
3098 if (TrySpin (Self) > 0) break ;
|
|
3099
|
|
3100 TEVENT (Wait Reentry - parking) ;
|
|
3101
|
|
3102 // State transition wrappers around park() ...
|
|
3103 // ReenterI() wisely defers state transitions until
|
|
3104 // it's clear we must park the thread.
|
|
3105 {
|
|
3106 OSThreadContendState osts(Self->osthread());
|
|
3107 ThreadBlockInVM tbivm(jt);
|
|
3108
|
|
3109 // cleared by handle_special_suspend_equivalent_condition()
|
|
3110 // or java_suspend_self()
|
|
3111 jt->set_suspend_equivalent();
|
|
3112 if (SyncFlags & 1) {
|
|
3113 Self->_ParkEvent->park ((jlong)1000) ;
|
|
3114 } else {
|
|
3115 Self->_ParkEvent->park () ;
|
|
3116 }
|
|
3117
|
|
3118 // were we externally suspended while we were waiting?
|
|
3119 for (;;) {
|
|
3120 if (!ExitSuspendEquivalent (jt)) break ;
|
|
3121 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
|
|
3122 jt->java_suspend_self();
|
|
3123 jt->set_suspend_equivalent();
|
|
3124 }
|
|
3125 }
|
|
3126
|
|
3127 // Try again, but just so we distinguish between futile wakeups and
|
|
3128 // successful wakeups. The following test isn't algorithmically
|
|
3129 // necessary, but it helps us maintain sensible statistics.
|
|
3130 if (TryLock(Self) > 0) break ;
|
|
3131
|
|
3132 // The lock is still contested.
|
|
3133 // Keep a tally of the # of futile wakeups.
|
|
3134 // Note that the counter is not protected by a lock or updated by atomics.
|
|
3135 // That is by design - we trade "lossy" counters which are exposed to
|
|
3136 // races during updates for a lower probe effect.
|
|
3137 TEVENT (Wait Reentry - futile wakeup) ;
|
|
3138 ++ nWakeups ;
|
|
3139
|
|
3140 // Assuming this is not a spurious wakeup we'll normally
|
|
3141 // find that _succ == Self.
|
|
3142 if (_succ == Self) _succ = NULL ;
|
|
3143
|
|
3144 // Invariant: after clearing _succ a contending thread
|
|
3145 // *must* retry _owner before parking.
|
|
3146 OrderAccess::fence() ;
|
|
3147
|
|
3148 if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
|
|
3149 ObjectSynchronizer::_sync_FutileWakeups->inc() ;
|
|
3150 }
|
|
3151 }
|
|
3152
|
|
3153 // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
|
|
3154 // Normally we'll find Self on the EntryList.
|
|
3155 // Unlinking from the EntryList is constant-time and atomic-free.
|
|
3156 // From the perspective of the lock owner (this thread), the
|
|
3157 // EntryList is stable and cxq is prepend-only.
|
|
3158 // The head of cxq is volatile but the interior is stable.
|
|
3159 // In addition, Self.TState is stable.
|
|
3160
|
|
3161 assert (_owner == Self, "invariant") ;
|
|
3162 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
|
|
3163 UnlinkAfterAcquire (Self, SelfNode) ;
|
|
3164 if (_succ == Self) _succ = NULL ;
|
|
3165 assert (_succ != Self, "invariant") ;
|
|
3166 SelfNode->TState = ObjectWaiter::TS_RUN ;
|
|
3167 OrderAccess::fence() ; // see comments at the end of EnterI()
|
|
3168 }
|
|
3169
|
|
3170 bool ObjectMonitor::try_enter(Thread* THREAD) {
|
|
3171 if (THREAD != _owner) {
|
|
3172 if (THREAD->is_lock_owned ((address)_owner)) {
|
|
3173 assert(_recursions == 0, "internal state error");
|
|
3174 _owner = THREAD ;
|
|
3175 _recursions = 1 ;
|
|
3176 OwnerIsThread = 1 ;
|
|
3177 return true;
|
|
3178 }
|
|
3179 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
|
|
3180 return false;
|
|
3181 }
|
|
3182 return true;
|
|
3183 } else {
|
|
3184 _recursions++;
|
|
3185 return true;
|
|
3186 }
|
|
3187 }
|
|
3188
|
|
3189 void ATTR ObjectMonitor::enter(TRAPS) {
|
|
3190 // The following code is ordered to check the most common cases first
|
|
3191 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
|
|
3192 Thread * const Self = THREAD ;
|
|
3193 void * cur ;
|
|
3194
|
|
3195 cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
|
|
3196 if (cur == NULL) {
|
|
3197 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
|
|
3198 assert (_recursions == 0 , "invariant") ;
|
|
3199 assert (_owner == Self, "invariant") ;
|
|
3200 // CONSIDER: set or assert OwnerIsThread == 1
|
|
3201 return ;
|
|
3202 }
|
|
3203
|
|
3204 if (cur == Self) {
|
|
3205 // TODO-FIXME: check for integer overflow! BUGID 6557169.
|
|
3206 _recursions ++ ;
|
|
3207 return ;
|
|
3208 }
|
|
3209
|
|
3210 if (Self->is_lock_owned ((address)cur)) {
|
|
3211 assert (_recursions == 0, "internal state error");
|
|
3212 _recursions = 1 ;
|
|
3213 // Commute owner from a thread-specific on-stack BasicLockObject address to
|
|
3214 // a full-fledged "Thread *".
|
|
3215 _owner = Self ;
|
|
3216 OwnerIsThread = 1 ;
|
|
3217 return ;
|
|
3218 }
|
|
3219
|
|
3220 // We've encountered genuine contention.
|
|
3221 assert (Self->_Stalled == 0, "invariant") ;
|
|
3222 Self->_Stalled = intptr_t(this) ;
|
|
3223
|
|
3224 // Try one round of spinning *before* enqueueing Self
|
|
3225 // and before going through the awkward and expensive state
|
|
3226 // transitions. The following spin is strictly optional ...
|
|
3227 // Note that if we acquire the monitor from an initial spin
|
|
3228 // we forgo posting JVMTI events and firing DTRACE probes.
|
|
3229 if (Knob_SpinEarly && TrySpin (Self) > 0) {
|
|
3230 assert (_owner == Self , "invariant") ;
|
|
3231 assert (_recursions == 0 , "invariant") ;
|
|
3232 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
|
|
3233 Self->_Stalled = 0 ;
|
|
3234 return ;
|
|
3235 }
|
|
3236
|
|
3237 assert (_owner != Self , "invariant") ;
|
|
3238 assert (_succ != Self , "invariant") ;
|
|
3239 assert (Self->is_Java_thread() , "invariant") ;
|
|
3240 JavaThread * jt = (JavaThread *) Self ;
|
|
3241 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
|
|
3242 assert (jt->thread_state() != _thread_blocked , "invariant") ;
|
|
3243 assert (this->object() != NULL , "invariant") ;
|
|
3244 assert (_count >= 0, "invariant") ;
|
|
3245
|
|
3246 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
|
|
3247 // Ensure the object-monitor relationship remains stable while there's contention.
|
|
3248 Atomic::inc_ptr(&_count);
|
|
3249
|
|
3250 { // Change java thread status to indicate blocked on monitor enter.
|
|
3251 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
|
|
3252
|
|
3253 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
|
|
3254 if (JvmtiExport::should_post_monitor_contended_enter()) {
|
|
3255 JvmtiExport::post_monitor_contended_enter(jt, this);
|
|
3256 }
|
|
3257
|
|
3258 OSThreadContendState osts(Self->osthread());
|
|
3259 ThreadBlockInVM tbivm(jt);
|
|
3260
|
|
3261 Self->set_current_pending_monitor(this);
|
|
3262
|
|
3263 // TODO-FIXME: change the following for(;;) loop to straight-line code.
|
|
3264 for (;;) {
|
|
3265 jt->set_suspend_equivalent();
|
|
3266 // cleared by handle_special_suspend_equivalent_condition()
|
|
3267 // or java_suspend_self()
|
|
3268
|
|
3269 EnterI (THREAD) ;
|
|
3270
|
|
3271 if (!ExitSuspendEquivalent(jt)) break ;
|
|
3272
|
|
3273 //
|
|
3274 // We have acquired the contended monitor, but while we were
|
|
3275 // waiting another thread suspended us. We don't want to enter
|
|
3276 // the monitor while suspended because that would surprise the
|
|
3277 // thread that suspended us.
|
|
3278 //
|
|
3279 _recursions = 0 ;
|
|
3280 _succ = NULL ;
|
|
3281 exit (Self) ;
|
|
3282
|
|
3283 jt->java_suspend_self();
|
|
3284 }
|
|
3285 Self->set_current_pending_monitor(NULL);
|
|
3286 }
|
|
3287
|
|
3288 Atomic::dec_ptr(&_count);
|
|
3289 assert (_count >= 0, "invariant") ;
|
|
3290 Self->_Stalled = 0 ;
|
|
3291
|
|
3292 // Must either set _recursions = 0 or ASSERT _recursions == 0.
|
|
3293 assert (_recursions == 0 , "invariant") ;
|
|
3294 assert (_owner == Self , "invariant") ;
|
|
3295 assert (_succ != Self , "invariant") ;
|
|
3296 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
|
|
3297
|
|
3298 // The thread -- now the owner -- is back in vm mode.
|
|
3299 // Report the glorious news via TI,DTrace and jvmstat.
|
|
3300 // The probe effect is non-trivial. All the reportage occurs
|
|
3301 // while we hold the monitor, increasing the length of the critical
|
|
3302 // section. Amdahl's parallel speedup law comes vividly into play.
|
|
3303 //
|
|
3304 // Another option might be to aggregate the events (thread local or
|
|
3305 // per-monitor aggregation) and defer reporting until a more opportune
|
|
3306 // time -- such as next time some thread encounters contention but has
|
|
3307 // yet to acquire the lock. While spinning that thread could
|
|
3308 // spinning we could increment JVMStat counters, etc.
|
|
3309
|
|
3310 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
|
|
3311 if (JvmtiExport::should_post_monitor_contended_entered()) {
|
|
3312 JvmtiExport::post_monitor_contended_entered(jt, this);
|
|
3313 }
|
|
3314 if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) {
|
|
3315 ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ;
|
|
3316 }
|
|
3317 }
|
|
3318
|
|
3319 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
|
|
3320 assert (_owner == Self, "invariant") ;
|
|
3321
|
|
3322 // Exit protocol:
|
|
3323 // 1. ST _succ = wakee
|
|
3324 // 2. membar #loadstore|#storestore;
|
|
3325 // 2. ST _owner = NULL
|
|
3326 // 3. unpark(wakee)
|
|
3327
|
|
3328 _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
|
|
3329 ParkEvent * Trigger = Wakee->_event ;
|
|
3330
|
|
3331 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
|
|
3332 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
|
|
3333 // out-of-scope (non-extant).
|
|
3334 Wakee = NULL ;
|
|
3335
|
|
3336 // Drop the lock
|
|
3337 OrderAccess::release_store_ptr (&_owner, NULL) ;
|
|
3338 OrderAccess::fence() ; // ST _owner vs LD in unpark()
|
|
3339
|
|
3340 // TODO-FIXME:
|
|
3341 // If there's a safepoint pending the best policy would be to
|
|
3342 // get _this thread to a safepoint and only wake the successor
|
|
3343 // after the safepoint completed. monitorexit uses a "leaf"
|
|
3344 // state transition, however, so this thread can't become
|
|
3345 // safe at this point in time. (Its stack isn't walkable).
|
|
3346 // The next best thing is to defer waking the successor by
|
|
3347 // adding to a list of thread to be unparked after at the
|
|
3348 // end of the forthcoming STW).
|
|
3349 if (SafepointSynchronize::do_call_back()) {
|
|
3350 TEVENT (unpark before SAFEPOINT) ;
|
|
3351 }
|
|
3352
|
|
3353 // Possible optimizations ...
|
|
3354 //
|
|
3355 // * Consider: set Wakee->UnparkTime = timeNow()
|
|
3356 // When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()).
|
|
3357 // By measuring recent ONPROC latency we can approximate the
|
|
3358 // system load. In turn, we can feed that information back
|
|
3359 // into the spinning & succession policies.
|
|
3360 // (ONPROC latency correlates strongly with load).
|
|
3361 //
|
|
3362 // * Pull affinity:
|
|
3363 // If the wakee is cold then transiently setting it's affinity
|
|
3364 // to the current CPU is a good idea.
|
|
3365 // See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt
|
|
3366 Trigger->unpark() ;
|
|
3367
|
|
3368 // Maintain stats and report events to JVMTI
|
|
3369 if (ObjectSynchronizer::_sync_Parks != NULL) {
|
|
3370 ObjectSynchronizer::_sync_Parks->inc() ;
|
|
3371 }
|
|
3372 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
|
|
3373 }
|
|
3374
|
|
3375
|
|
3376 // exit()
|
|
3377 // ~~~~~~
|
|
3378 // Note that the collector can't reclaim the objectMonitor or deflate
|
|
3379 // the object out from underneath the thread calling ::exit() as the
|
|
3380 // thread calling ::exit() never transitions to a stable state.
|
|
3381 // This inhibits GC, which in turn inhibits asynchronous (and
|
|
3382 // inopportune) reclamation of "this".
|
|
3383 //
|
|
3384 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
|
|
3385 // There's one exception to the claim above, however. EnterI() can call
|
|
3386 // exit() to drop a lock if the acquirer has been externally suspended.
|
|
3387 // In that case exit() is called with _thread_state as _thread_blocked,
|
|
3388 // but the monitor's _count field is > 0, which inhibits reclamation.
|
|
3389 //
|
|
3390 // 1-0 exit
|
|
3391 // ~~~~~~~~
|
|
3392 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
|
|
3393 // the fast-path operators have been optimized so the common ::exit()
|
|
3394 // operation is 1-0. See i486.ad fast_unlock(), for instance.
|
|
3395 // The code emitted by fast_unlock() elides the usual MEMBAR. This
|
|
3396 // greatly improves latency -- MEMBAR and CAS having considerable local
|
|
3397 // latency on modern processors -- but at the cost of "stranding". Absent the
|
|
3398 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
|
|
3399 // ::enter() path, resulting in the entering thread being stranding
|
|
3400 // and a progress-liveness failure. Stranding is extremely rare.
|
|
3401 // We use timers (timed park operations) & periodic polling to detect
|
|
3402 // and recover from stranding. Potentially stranded threads periodically
|
|
3403 // wake up and poll the lock. See the usage of the _Responsible variable.
|
|
3404 //
|
|
3405 // The CAS() in enter provides for safety and exclusion, while the CAS or
|
|
3406 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
|
|
3407 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
|
|
3408 // We detect and recover from stranding with timers.
|
|
3409 //
|
|
3410 // If a thread transiently strands it'll park until (a) another
|
|
3411 // thread acquires the lock and then drops the lock, at which time the
|
|
3412 // exiting thread will notice and unpark the stranded thread, or, (b)
|
|
3413 // the timer expires. If the lock is high traffic then the stranding latency
|
|
3414 // will be low due to (a). If the lock is low traffic then the odds of
|
|
3415 // stranding are lower, although the worst-case stranding latency
|
|
3416 // is longer. Critically, we don't want to put excessive load in the
|
|
3417 // platform's timer subsystem. We want to minimize both the timer injection
|
|
3418 // rate (timers created/sec) as well as the number of timers active at
|
|
3419 // any one time. (more precisely, we want to minimize timer-seconds, which is
|
|
3420 // the integral of the # of active timers at any instant over time).
|
|
3421 // Both impinge on OS scalability. Given that, at most one thread parked on
|
|
3422 // a monitor will use a timer.
|
|
3423
|
|
3424 void ATTR ObjectMonitor::exit(TRAPS) {
|
|
3425 Thread * Self = THREAD ;
|
|
3426 if (THREAD != _owner) {
|
|
3427 if (THREAD->is_lock_owned((address) _owner)) {
|
|
3428 // Transmute _owner from a BasicLock pointer to a Thread address.
|
|
3429 // We don't need to hold _mutex for this transition.
|
|
3430 // Non-null to Non-null is safe as long as all readers can
|
|
3431 // tolerate either flavor.
|
|
3432 assert (_recursions == 0, "invariant") ;
|
|
3433 _owner = THREAD ;
|
|
3434 _recursions = 0 ;
|
|
3435 OwnerIsThread = 1 ;
|
|
3436 } else {
|
|
3437 // NOTE: we need to handle unbalanced monitor enter/exit
|
|
3438 // in native code by throwing an exception.
|
|
3439 // TODO: Throw an IllegalMonitorStateException ?
|
|
3440 TEVENT (Exit - Throw IMSX) ;
|
|
3441 assert(false, "Non-balanced monitor enter/exit!");
|
|
3442 if (false) {
|
|
3443 THROW(vmSymbols::java_lang_IllegalMonitorStateException());
|
|
3444 }
|
|
3445 return;
|
|
3446 }
|
|
3447 }
|
|
3448
|
|
3449 if (_recursions != 0) {
|
|
3450 _recursions--; // this is simple recursive enter
|
|
3451 TEVENT (Inflated exit - recursive) ;
|
|
3452 return ;
|
|
3453 }
|
|
3454
|
|
3455 // Invariant: after setting Responsible=null an thread must execute
|
|
3456 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
|
|
3457 if ((SyncFlags & 4) == 0) {
|
|
3458 _Responsible = NULL ;
|
|
3459 }
|
|
3460
|
|
3461 for (;;) {
|
|
3462 assert (THREAD == _owner, "invariant") ;
|
|
3463
|
|
3464 // Fast-path monitor exit:
|
|
3465 //
|
|
3466 // Observe the Dekker/Lamport duality:
|
|
3467 // A thread in ::exit() executes:
|
|
3468 // ST Owner=null; MEMBAR; LD EntryList|cxq.
|
|
3469 // A thread in the contended ::enter() path executes the complementary:
|
|
3470 // ST EntryList|cxq = nonnull; MEMBAR; LD Owner.
|
|
3471 //
|
|
3472 // Note that there's a benign race in the exit path. We can drop the
|
|
3473 // lock, another thread can reacquire the lock immediately, and we can
|
|
3474 // then wake a thread unnecessarily (yet another flavor of futile wakeup).
|
|
3475 // This is benign, and we've structured the code so the windows are short
|
|
3476 // and the frequency of such futile wakeups is low.
|
|
3477 //
|
|
3478 // We could eliminate the race by encoding both the "LOCKED" state and
|
|
3479 // the queue head in a single word. Exit would then use either CAS to
|
|
3480 // clear the LOCKED bit/byte. This precludes the desirable 1-0 optimization,
|
|
3481 // however.
|
|
3482 //
|
|
3483 // Possible fast-path ::exit() optimization:
|
|
3484 // The current fast-path exit implementation fetches both cxq and EntryList.
|
|
3485 // See also i486.ad fast_unlock(). Testing has shown that two LDs
|
|
3486 // isn't measurably slower than a single LD on any platforms.
|
|
3487 // Still, we could reduce the 2 LDs to one or zero by one of the following:
|
|
3488 //
|
|
3489 // - Use _count instead of cxq|EntryList
|
|
3490 // We intend to eliminate _count, however, when we switch
|
|
3491 // to on-the-fly deflation in ::exit() as is used in
|
|
3492 // Metalocks and RelaxedLocks.
|
|
3493 //
|
|
3494 // - Establish the invariant that cxq == null implies EntryList == null.
|
|
3495 // set cxq == EMPTY (1) to encode the state where cxq is empty
|
|
3496 // by EntryList != null. EMPTY is a distinguished value.
|
|
3497 // The fast-path exit() would fetch cxq but not EntryList.
|
|
3498 //
|
|
3499 // - Encode succ as follows:
|
|
3500 // succ = t : Thread t is the successor -- t is ready or is spinning.
|
|
3501 // Exiting thread does not need to wake a successor.
|
|
3502 // succ = 0 : No successor required -> (EntryList|cxq) == null
|
|
3503 // Exiting thread does not need to wake a successor
|
|
3504 // succ = 1 : Successor required -> (EntryList|cxq) != null and
|
|
3505 // logically succ == null.
|
|
3506 // Exiting thread must wake a successor.
|
|
3507 //
|
|
3508 // The 1-1 fast-exit path would appear as :
|
|
3509 // _owner = null ; membar ;
|
|
3510 // if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath
|
|
3511 // goto FastPathDone ;
|
|
3512 //
|
|
3513 // and the 1-0 fast-exit path would appear as:
|
|
3514 // if (_succ == 1) goto SlowPath
|
|
3515 // Owner = null ;
|
|
3516 // goto FastPathDone
|
|
3517 //
|
|
3518 // - Encode the LSB of _owner as 1 to indicate that exit()
|
|
3519 // must use the slow-path and make a successor ready.
|
|
3520 // (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null
|
|
3521 // (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously)
|
|
3522 // The 1-0 fast exit path would read:
|
|
3523 // if (_owner != Self) goto SlowPath
|
|
3524 // _owner = null
|
|
3525 // goto FastPathDone
|
|
3526
|
|
3527 if (Knob_ExitPolicy == 0) {
|
|
3528 // release semantics: prior loads and stores from within the critical section
|
|
3529 // must not float (reorder) past the following store that drops the lock.
|
|
3530 // On SPARC that requires MEMBAR #loadstore|#storestore.
|
|
3531 // But of course in TSO #loadstore|#storestore is not required.
|
|
3532 // I'd like to write one of the following:
|
|
3533 // A. OrderAccess::release() ; _owner = NULL
|
|
3534 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
|
|
3535 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
|
|
3536 // store into a _dummy variable. That store is not needed, but can result
|
|
3537 // in massive wasteful coherency traffic on classic SMP systems.
|
|
3538 // Instead, I use release_store(), which is implemented as just a simple
|
|
3539 // ST on x64, x86 and SPARC.
|
|
3540 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
|
|
3541 OrderAccess::storeload() ; // See if we need to wake a successor
|
|
3542 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
|
|
3543 TEVENT (Inflated exit - simple egress) ;
|
|
3544 return ;
|
|
3545 }
|
|
3546 TEVENT (Inflated exit - complex egress) ;
|
|
3547
|
|
3548 // Normally the exiting thread is responsible for ensuring succession,
|
|
3549 // but if other successors are ready or other entering threads are spinning
|
|
3550 // then this thread can simply store NULL into _owner and exit without
|
|
3551 // waking a successor. The existence of spinners or ready successors
|
|
3552 // guarantees proper succession (liveness). Responsibility passes to the
|
|
3553 // ready or running successors. The exiting thread delegates the duty.
|
|
3554 // More precisely, if a successor already exists this thread is absolved
|
|
3555 // of the responsibility of waking (unparking) one.
|
|
3556 //
|
|
3557 // The _succ variable is critical to reducing futile wakeup frequency.
|
|
3558 // _succ identifies the "heir presumptive" thread that has been made
|
|
3559 // ready (unparked) but that has not yet run. We need only one such
|
|
3560 // successor thread to guarantee progress.
|
|
3561 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
|
|
3562 // section 3.3 "Futile Wakeup Throttling" for details.
|
|
3563 //
|
|
3564 // Note that spinners in Enter() also set _succ non-null.
|
|
3565 // In the current implementation spinners opportunistically set
|
|
3566 // _succ so that exiting threads might avoid waking a successor.
|
|
3567 // Another less appealing alternative would be for the exiting thread
|
|
3568 // to drop the lock and then spin briefly to see if a spinner managed
|
|
3569 // to acquire the lock. If so, the exiting thread could exit
|
|
3570 // immediately without waking a successor, otherwise the exiting
|
|
3571 // thread would need to dequeue and wake a successor.
|
|
3572 // (Note that we'd need to make the post-drop spin short, but no
|
|
3573 // shorter than the worst-case round-trip cache-line migration time.
|
|
3574 // The dropped lock needs to become visible to the spinner, and then
|
|
3575 // the acquisition of the lock by the spinner must become visible to
|
|
3576 // the exiting thread).
|
|
3577 //
|
|
3578
|
|
3579 // It appears that an heir-presumptive (successor) must be made ready.
|
|
3580 // Only the current lock owner can manipulate the EntryList or
|
|
3581 // drain _cxq, so we need to reacquire the lock. If we fail
|
|
3582 // to reacquire the lock the responsibility for ensuring succession
|
|
3583 // falls to the new owner.
|
|
3584 //
|
|
3585 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
|
|
3586 return ;
|
|
3587 }
|
|
3588 TEVENT (Exit - Reacquired) ;
|
|
3589 } else {
|
|
3590 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
|
|
3591 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
|
|
3592 OrderAccess::storeload() ;
|
|
3593 // Ratify the previously observed values.
|
|
3594 if (_cxq == NULL || _succ != NULL) {
|
|
3595 TEVENT (Inflated exit - simple egress) ;
|
|
3596 return ;
|
|
3597 }
|
|
3598
|
|
3599 // inopportune interleaving -- the exiting thread (this thread)
|
|
3600 // in the fast-exit path raced an entering thread in the slow-enter
|
|
3601 // path.
|
|
3602 // We have two choices:
|
|
3603 // A. Try to reacquire the lock.
|
|
3604 // If the CAS() fails return immediately, otherwise
|
|
3605 // we either restart/rerun the exit operation, or simply
|
|
3606 // fall-through into the code below which wakes a successor.
|
|
3607 // B. If the elements forming the EntryList|cxq are TSM
|
|
3608 // we could simply unpark() the lead thread and return
|
|
3609 // without having set _succ.
|
|
3610 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
|
|
3611 TEVENT (Inflated exit - reacquired succeeded) ;
|
|
3612 return ;
|
|
3613 }
|
|
3614 TEVENT (Inflated exit - reacquired failed) ;
|
|
3615 } else {
|
|
3616 TEVENT (Inflated exit - complex egress) ;
|
|
3617 }
|
|
3618 }
|
|
3619
|
|
3620 guarantee (_owner == THREAD, "invariant") ;
|
|
3621
|
|
3622 // Select an appropriate successor ("heir presumptive") from the EntryList
|
|
3623 // and make it ready. Generally we just wake the head of EntryList .
|
|
3624 // There's no algorithmic constraint that we use the head - it's just
|
|
3625 // a policy decision. Note that the thread at head of the EntryList
|
|
3626 // remains at the head until it acquires the lock. This means we'll
|
|
3627 // repeatedly wake the same thread until it manages to grab the lock.
|
|
3628 // This is generally a good policy - if we're seeing lots of futile wakeups
|
|
3629 // at least we're waking/rewaking a thread that's like to be hot or warm
|
|
3630 // (have residual D$ and TLB affinity).
|
|
3631 //
|
|
3632 // "Wakeup locality" optimization:
|
|
3633 // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt
|
|
3634 // In the future we'll try to bias the selection mechanism
|
|
3635 // to preferentially pick a thread that recently ran on
|
|
3636 // a processor element that shares cache with the CPU on which
|
|
3637 // the exiting thread is running. We need access to Solaris'
|
|
3638 // schedctl.sc_cpu to make that work.
|
|
3639 //
|
|
3640 ObjectWaiter * w = NULL ;
|
|
3641 int QMode = Knob_QMode ;
|
|
3642
|
|
3643 if (QMode == 2 && _cxq != NULL) {
|
|
3644 // QMode == 2 : cxq has precedence over EntryList.
|
|
3645 // Try to directly wake a successor from the cxq.
|
|
3646 // If successful, the successor will need to unlink itself from cxq.
|
|
3647 w = _cxq ;
|
|
3648 assert (w != NULL, "invariant") ;
|
|
3649 assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
|
|
3650 ExitEpilog (Self, w) ;
|
|
3651 return ;
|
|
3652 }
|
|
3653
|
|
3654 if (QMode == 3 && _cxq != NULL) {
|
|
3655 // Aggressively drain cxq into EntryList at the first opportunity.
|
|
3656 // This policy ensure that recently-run threads live at the head of EntryList.
|
|
3657 // Drain _cxq into EntryList - bulk transfer.
|
|
3658 // First, detach _cxq.
|
|
3659 // The following loop is tantamount to: w = swap (&cxq, NULL)
|
|
3660 w = _cxq ;
|
|
3661 for (;;) {
|
|
3662 assert (w != NULL, "Invariant") ;
|
|
3663 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
|
|
3664 if (u == w) break ;
|
|
3665 w = u ;
|
|
3666 }
|
|
3667 assert (w != NULL , "invariant") ;
|
|
3668
|
|
3669 ObjectWaiter * q = NULL ;
|
|
3670 ObjectWaiter * p ;
|
|
3671 for (p = w ; p != NULL ; p = p->_next) {
|
|
3672 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
|
|
3673 p->TState = ObjectWaiter::TS_ENTER ;
|
|
3674 p->_prev = q ;
|
|
3675 q = p ;
|
|
3676 }
|
|
3677
|
|
3678 // Append the RATs to the EntryList
|
|
3679 // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
|
|
3680 ObjectWaiter * Tail ;
|
|
3681 for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
|
|
3682 if (Tail == NULL) {
|
|
3683 _EntryList = w ;
|
|
3684 } else {
|
|
3685 Tail->_next = w ;
|
|
3686 w->_prev = Tail ;
|
|
3687 }
|
|
3688
|
|
3689 // Fall thru into code that tries to wake a successor from EntryList
|
|
3690 }
|
|
3691
|
|
3692 if (QMode == 4 && _cxq != NULL) {
|
|
3693 // Aggressively drain cxq into EntryList at the first opportunity.
|
|
3694 // This policy ensure that recently-run threads live at the head of EntryList.
|
|
3695
|
|
3696 // Drain _cxq into EntryList - bulk transfer.
|
|
3697 // First, detach _cxq.
|
|
3698 // The following loop is tantamount to: w = swap (&cxq, NULL)
|
|
3699 w = _cxq ;
|
|
3700 for (;;) {
|
|
3701 assert (w != NULL, "Invariant") ;
|
|
3702 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
|
|
3703 if (u == w) break ;
|
|
3704 w = u ;
|
|
3705 }
|
|
3706 assert (w != NULL , "invariant") ;
|
|
3707
|
|
3708 ObjectWaiter * q = NULL ;
|
|
3709 ObjectWaiter * p ;
|
|
3710 for (p = w ; p != NULL ; p = p->_next) {
|
|
3711 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
|
|
3712 p->TState = ObjectWaiter::TS_ENTER ;
|
|
3713 p->_prev = q ;
|
|
3714 q = p ;
|
|
3715 }
|
|
3716
|
|
3717 // Prepend the RATs to the EntryList
|
|
3718 if (_EntryList != NULL) {
|
|
3719 q->_next = _EntryList ;
|
|
3720 _EntryList->_prev = q ;
|
|
3721 }
|
|
3722 _EntryList = w ;
|
|
3723
|
|
3724 // Fall thru into code that tries to wake a successor from EntryList
|
|
3725 }
|
|
3726
|
|
3727 w = _EntryList ;
|
|
3728 if (w != NULL) {
|
|
3729 // I'd like to write: guarantee (w->_thread != Self).
|
|
3730 // But in practice an exiting thread may find itself on the EntryList.
|
|
3731 // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
|
|
3732 // then calls exit(). Exit release the lock by setting O._owner to NULL.
|
|
3733 // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The
|
|
3734 // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
|
|
3735 // release the lock "O". T2 resumes immediately after the ST of null into
|
|
3736 // _owner, above. T2 notices that the EntryList is populated, so it
|
|
3737 // reacquires the lock and then finds itself on the EntryList.
|
|
3738 // Given all that, we have to tolerate the circumstance where "w" is
|
|
3739 // associated with Self.
|
|
3740 assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
|
|
3741 ExitEpilog (Self, w) ;
|
|
3742 return ;
|
|
3743 }
|
|
3744
|
|
3745 // If we find that both _cxq and EntryList are null then just
|
|
3746 // re-run the exit protocol from the top.
|
|
3747 w = _cxq ;
|
|
3748 if (w == NULL) continue ;
|
|
3749
|
|
3750 // Drain _cxq into EntryList - bulk transfer.
|
|
3751 // First, detach _cxq.
|
|
3752 // The following loop is tantamount to: w = swap (&cxq, NULL)
|
|
3753 for (;;) {
|
|
3754 assert (w != NULL, "Invariant") ;
|
|
3755 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
|
|
3756 if (u == w) break ;
|
|
3757 w = u ;
|
|
3758 }
|
|
3759 TEVENT (Inflated exit - drain cxq into EntryList) ;
|
|
3760
|
|
3761 assert (w != NULL , "invariant") ;
|
|
3762 assert (_EntryList == NULL , "invariant") ;
|
|
3763
|
|
3764 // Convert the LIFO SLL anchored by _cxq into a DLL.
|
|
3765 // The list reorganization step operates in O(LENGTH(w)) time.
|
|
3766 // It's critical that this step operate quickly as
|
|
3767 // "Self" still holds the outer-lock, restricting parallelism
|
|
3768 // and effectively lengthening the critical section.
|
|
3769 // Invariant: s chases t chases u.
|
|
3770 // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
|
|
3771 // we have faster access to the tail.
|
|
3772
|
|
3773 if (QMode == 1) {
|
|
3774 // QMode == 1 : drain cxq to EntryList, reversing order
|
|
3775 // We also reverse the order of the list.
|
|
3776 ObjectWaiter * s = NULL ;
|
|
3777 ObjectWaiter * t = w ;
|
|
3778 ObjectWaiter * u = NULL ;
|
|
3779 while (t != NULL) {
|
|
3780 guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
|
|
3781 t->TState = ObjectWaiter::TS_ENTER ;
|
|
3782 u = t->_next ;
|
|
3783 t->_prev = u ;
|
|
3784 t->_next = s ;
|
|
3785 s = t;
|
|
3786 t = u ;
|
|
3787 }
|
|
3788 _EntryList = s ;
|
|
3789 assert (s != NULL, "invariant") ;
|
|
3790 } else {
|
|
3791 // QMode == 0 or QMode == 2
|
|
3792 _EntryList = w ;
|
|
3793 ObjectWaiter * q = NULL ;
|
|
3794 ObjectWaiter * p ;
|
|
3795 for (p = w ; p != NULL ; p = p->_next) {
|
|
3796 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
|
|
3797 p->TState = ObjectWaiter::TS_ENTER ;
|
|
3798 p->_prev = q ;
|
|
3799 q = p ;
|
|
3800 }
|
|
3801 }
|
|
3802
|
|
3803 // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
|
|
3804 // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
|
|
3805
|
|
3806 // See if we can abdicate to a spinner instead of waking a thread.
|
|
3807 // A primary goal of the implementation is to reduce the
|
|
3808 // context-switch rate.
|
|
3809 if (_succ != NULL) continue;
|
|
3810
|
|
3811 w = _EntryList ;
|
|
3812 if (w != NULL) {
|
|
3813 guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
|
|
3814 ExitEpilog (Self, w) ;
|
|
3815 return ;
|
|
3816 }
|
|
3817 }
|
|
3818 }
|
|
3819 // complete_exit exits a lock returning recursion count
|
|
3820 // complete_exit/reenter operate as a wait without waiting
|
|
3821 // complete_exit requires an inflated monitor
|
|
3822 // The _owner field is not always the Thread addr even with an
|
|
3823 // inflated monitor, e.g. the monitor can be inflated by a non-owning
|
|
3824 // thread due to contention.
|
|
3825 intptr_t ObjectMonitor::complete_exit(TRAPS) {
|
|
3826 Thread * const Self = THREAD;
|
|
3827 assert(Self->is_Java_thread(), "Must be Java thread!");
|
|
3828 JavaThread *jt = (JavaThread *)THREAD;
|
|
3829
|
|
3830 DeferredInitialize();
|
|
3831
|
|
3832 if (THREAD != _owner) {
|
|
3833 if (THREAD->is_lock_owned ((address)_owner)) {
|
|
3834 assert(_recursions == 0, "internal state error");
|
|
3835 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */
|
|
3836 _recursions = 0 ;
|
|
3837 OwnerIsThread = 1 ;
|
|
3838 }
|
|
3839 }
|
|
3840
|
|
3841 guarantee(Self == _owner, "complete_exit not owner");
|
|
3842 intptr_t save = _recursions; // record the old recursion count
|
|
3843 _recursions = 0; // set the recursion level to be 0
|
|
3844 exit (Self) ; // exit the monitor
|
|
3845 guarantee (_owner != Self, "invariant");
|
|
3846 return save;
|
|
3847 }
|
|
3848
|
|
3849 // reenter() enters a lock and sets recursion count
|
|
3850 // complete_exit/reenter operate as a wait without waiting
|
|
3851 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
|
|
3852 Thread * const Self = THREAD;
|
|
3853 assert(Self->is_Java_thread(), "Must be Java thread!");
|
|
3854 JavaThread *jt = (JavaThread *)THREAD;
|
|
3855
|
|
3856 guarantee(_owner != Self, "reenter already owner");
|
|
3857 enter (THREAD); // enter the monitor
|
|
3858 guarantee (_recursions == 0, "reenter recursion");
|
|
3859 _recursions = recursions;
|
|
3860 return;
|
|
3861 }
|
|
3862
|
|
3863 // Note: a subset of changes to ObjectMonitor::wait()
|
|
3864 // will need to be replicated in complete_exit above
|
|
3865 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
|
|
3866 Thread * const Self = THREAD ;
|
|
3867 assert(Self->is_Java_thread(), "Must be Java thread!");
|
|
3868 JavaThread *jt = (JavaThread *)THREAD;
|
|
3869
|
|
3870 DeferredInitialize () ;
|
|
3871
|
|
3872 // Throw IMSX or IEX.
|
|
3873 CHECK_OWNER();
|
|
3874
|
|
3875 // check for a pending interrupt
|
|
3876 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
|
|
3877 // post monitor waited event. Note that this is past-tense, we are done waiting.
|
|
3878 if (JvmtiExport::should_post_monitor_waited()) {
|
|
3879 // Note: 'false' parameter is passed here because the
|
|
3880 // wait was not timed out due to thread interrupt.
|
|
3881 JvmtiExport::post_monitor_waited(jt, this, false);
|
|
3882 }
|
|
3883 TEVENT (Wait - Throw IEX) ;
|
|
3884 THROW(vmSymbols::java_lang_InterruptedException());
|
|
3885 return ;
|
|
3886 }
|
|
3887 TEVENT (Wait) ;
|
|
3888
|
|
3889 assert (Self->_Stalled == 0, "invariant") ;
|
|
3890 Self->_Stalled = intptr_t(this) ;
|
|
3891 jt->set_current_waiting_monitor(this);
|
|
3892
|
|
3893 // create a node to be put into the queue
|
|
3894 // Critically, after we reset() the event but prior to park(), we must check
|
|
3895 // for a pending interrupt.
|
|
3896 ObjectWaiter node(Self);
|
|
3897 node.TState = ObjectWaiter::TS_WAIT ;
|
|
3898 Self->_ParkEvent->reset() ;
|
|
3899 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
|
|
3900
|
|
3901 // Enter the waiting queue, which is a circular doubly linked list in this case
|
|
3902 // but it could be a priority queue or any data structure.
|
|
3903 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
|
|
3904 // by the the owner of the monitor *except* in the case where park()
|
|
3905 // returns because of a timeout of interrupt. Contention is exceptionally rare
|
|
3906 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
|
|
3907
|
|
3908 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
|
|
3909 AddWaiter (&node) ;
|
|
3910 Thread::SpinRelease (&_WaitSetLock) ;
|
|
3911
|
|
3912 if ((SyncFlags & 4) == 0) {
|
|
3913 _Responsible = NULL ;
|
|
3914 }
|
|
3915 intptr_t save = _recursions; // record the old recursion count
|
|
3916 _waiters++; // increment the number of waiters
|
|
3917 _recursions = 0; // set the recursion level to be 1
|
|
3918 exit (Self) ; // exit the monitor
|
|
3919 guarantee (_owner != Self, "invariant") ;
|
|
3920
|
|
3921 // As soon as the ObjectMonitor's ownership is dropped in the exit()
|
|
3922 // call above, another thread can enter() the ObjectMonitor, do the
|
|
3923 // notify(), and exit() the ObjectMonitor. If the other thread's
|
|
3924 // exit() call chooses this thread as the successor and the unpark()
|
|
3925 // call happens to occur while this thread is posting a
|
|
3926 // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
|
|
3927 // handler using RawMonitors and consuming the unpark().
|
|
3928 //
|
|
3929 // To avoid the problem, we re-post the event. This does no harm
|
|
3930 // even if the original unpark() was not consumed because we are the
|
|
3931 // chosen successor for this monitor.
|
|
3932 if (node._notified != 0 && _succ == Self) {
|
|
3933 node._event->unpark();
|
|
3934 }
|
|
3935
|
|
3936 // The thread is on the WaitSet list - now park() it.
|
|
3937 // On MP systems it's conceivable that a brief spin before we park
|
|
3938 // could be profitable.
|
|
3939 //
|
|
3940 // TODO-FIXME: change the following logic to a loop of the form
|
|
3941 // while (!timeout && !interrupted && _notified == 0) park()
|
|
3942
|
|
3943 int ret = OS_OK ;
|
|
3944 int WasNotified = 0 ;
|
|
3945 { // State transition wrappers
|
|
3946 OSThread* osthread = Self->osthread();
|
|
3947 OSThreadWaitState osts(osthread, true);
|
|
3948 {
|
|
3949 ThreadBlockInVM tbivm(jt);
|
|
3950 // Thread is in thread_blocked state and oop access is unsafe.
|
|
3951 jt->set_suspend_equivalent();
|
|
3952
|
|
3953 if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
|
|
3954 // Intentionally empty
|
|
3955 } else
|
|
3956 if (node._notified == 0) {
|
|
3957 if (millis <= 0) {
|
|
3958 Self->_ParkEvent->park () ;
|
|
3959 } else {
|
|
3960 ret = Self->_ParkEvent->park (millis) ;
|
|
3961 }
|
|
3962 }
|
|
3963
|
|
3964 // were we externally suspended while we were waiting?
|
|
3965 if (ExitSuspendEquivalent (jt)) {
|
|
3966 // TODO-FIXME: add -- if succ == Self then succ = null.
|
|
3967 jt->java_suspend_self();
|
|
3968 }
|
|
3969
|
|
3970 } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
|
|
3971
|
|
3972
|
|
3973 // Node may be on the WaitSet, the EntryList (or cxq), or in transition
|
|
3974 // from the WaitSet to the EntryList.
|
|
3975 // See if we need to remove Node from the WaitSet.
|
|
3976 // We use double-checked locking to avoid grabbing _WaitSetLock
|
|
3977 // if the thread is not on the wait queue.
|
|
3978 //
|
|
3979 // Note that we don't need a fence before the fetch of TState.
|
|
3980 // In the worst case we'll fetch a old-stale value of TS_WAIT previously
|
|
3981 // written by the is thread. (perhaps the fetch might even be satisfied
|
|
3982 // by a look-aside into the processor's own store buffer, although given
|
|
3983 // the length of the code path between the prior ST and this load that's
|
|
3984 // highly unlikely). If the following LD fetches a stale TS_WAIT value
|
|
3985 // then we'll acquire the lock and then re-fetch a fresh TState value.
|
|
3986 // That is, we fail toward safety.
|
|
3987
|
|
3988 if (node.TState == ObjectWaiter::TS_WAIT) {
|
|
3989 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
|
|
3990 if (node.TState == ObjectWaiter::TS_WAIT) {
|
|
3991 DequeueSpecificWaiter (&node) ; // unlink from WaitSet
|
|
3992 assert(node._notified == 0, "invariant");
|
|
3993 node.TState = ObjectWaiter::TS_RUN ;
|
|
3994 }
|
|
3995 Thread::SpinRelease (&_WaitSetLock) ;
|
|
3996 }
|
|
3997
|
|
3998 // The thread is now either on off-list (TS_RUN),
|
|
3999 // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
|
|
4000 // The Node's TState variable is stable from the perspective of this thread.
|
|
4001 // No other threads will asynchronously modify TState.
|
|
4002 guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
|
|
4003 OrderAccess::loadload() ;
|
|
4004 if (_succ == Self) _succ = NULL ;
|
|
4005 WasNotified = node._notified ;
|
|
4006
|
|
4007 // Reentry phase -- reacquire the monitor.
|
|
4008 // re-enter contended monitor after object.wait().
|
|
4009 // retain OBJECT_WAIT state until re-enter successfully completes
|
|
4010 // Thread state is thread_in_vm and oop access is again safe,
|
|
4011 // although the raw address of the object may have changed.
|
|
4012 // (Don't cache naked oops over safepoints, of course).
|
|
4013
|
|
4014 // post monitor waited event. Note that this is past-tense, we are done waiting.
|
|
4015 if (JvmtiExport::should_post_monitor_waited()) {
|
|
4016 JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
|
|
4017 }
|
|
4018 OrderAccess::fence() ;
|
|
4019
|
|
4020 assert (Self->_Stalled != 0, "invariant") ;
|
|
4021 Self->_Stalled = 0 ;
|
|
4022
|
|
4023 assert (_owner != Self, "invariant") ;
|
|
4024 ObjectWaiter::TStates v = node.TState ;
|
|
4025 if (v == ObjectWaiter::TS_RUN) {
|
|
4026 enter (Self) ;
|
|
4027 } else {
|
|
4028 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
|
|
4029 ReenterI (Self, &node) ;
|
|
4030 node.wait_reenter_end(this);
|
|
4031 }
|
|
4032
|
|
4033 // Self has reacquired the lock.
|
|
4034 // Lifecycle - the node representing Self must not appear on any queues.
|
|
4035 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
|
|
4036 // want residual elements associated with this thread left on any lists.
|
|
4037 guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
|
|
4038 assert (_owner == Self, "invariant") ;
|
|
4039 assert (_succ != Self , "invariant") ;
|
|
4040 } // OSThreadWaitState()
|
|
4041
|
|
4042 jt->set_current_waiting_monitor(NULL);
|
|
4043
|
|
4044 guarantee (_recursions == 0, "invariant") ;
|
|
4045 _recursions = save; // restore the old recursion count
|
|
4046 _waiters--; // decrement the number of waiters
|
|
4047
|
|
4048 // Verify a few postconditions
|
|
4049 assert (_owner == Self , "invariant") ;
|
|
4050 assert (_succ != Self , "invariant") ;
|
|
4051 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
|
|
4052
|
|
4053 if (SyncFlags & 32) {
|
|
4054 OrderAccess::fence() ;
|
|
4055 }
|
|
4056
|
|
4057 // check if the notification happened
|
|
4058 if (!WasNotified) {
|
|
4059 // no, it could be timeout or Thread.interrupt() or both
|
|
4060 // check for interrupt event, otherwise it is timeout
|
|
4061 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
|
|
4062 TEVENT (Wait - throw IEX from epilog) ;
|
|
4063 THROW(vmSymbols::java_lang_InterruptedException());
|
|
4064 }
|
|
4065 }
|
|
4066
|
|
4067 // NOTE: Spurious wake up will be consider as timeout.
|
|
4068 // Monitor notify has precedence over thread interrupt.
|
|
4069 }
|
|
4070
|
|
4071
|
|
4072 // Consider:
|
|
4073 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
|
|
4074 // then instead of transferring a thread from the WaitSet to the EntryList
|
|
4075 // we might just dequeue a thread from the WaitSet and directly unpark() it.
|
|
4076
|
|
4077 void ObjectMonitor::notify(TRAPS) {
|
|
4078 CHECK_OWNER();
|
|
4079 if (_WaitSet == NULL) {
|
|
4080 TEVENT (Empty-Notify) ;
|
|
4081 return ;
|
|
4082 }
|
|
4083 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
|
|
4084
|
|
4085 int Policy = Knob_MoveNotifyee ;
|
|
4086
|
|
4087 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
|
|
4088 ObjectWaiter * iterator = DequeueWaiter() ;
|
|
4089 if (iterator != NULL) {
|
|
4090 TEVENT (Notify1 - Transfer) ;
|
|
4091 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
|
|
4092 guarantee (iterator->_notified == 0, "invariant") ;
|
|
4093 // Disposition - what might we do with iterator ?
|
|
4094 // a. add it directly to the EntryList - either tail or head.
|
|
4095 // b. push it onto the front of the _cxq.
|
|
4096 // For now we use (a).
|
|
4097 if (Policy != 4) {
|
|
4098 iterator->TState = ObjectWaiter::TS_ENTER ;
|
|
4099 }
|
|
4100 iterator->_notified = 1 ;
|
|
4101
|
|
4102 ObjectWaiter * List = _EntryList ;
|
|
4103 if (List != NULL) {
|
|
4104 assert (List->_prev == NULL, "invariant") ;
|
|
4105 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
|
|
4106 assert (List != iterator, "invariant") ;
|
|
4107 }
|
|
4108
|
|
4109 if (Policy == 0) { // prepend to EntryList
|
|
4110 if (List == NULL) {
|
|
4111 iterator->_next = iterator->_prev = NULL ;
|
|
4112 _EntryList = iterator ;
|
|
4113 } else {
|
|
4114 List->_prev = iterator ;
|
|
4115 iterator->_next = List ;
|
|
4116 iterator->_prev = NULL ;
|
|
4117 _EntryList = iterator ;
|
|
4118 }
|
|
4119 } else
|
|
4120 if (Policy == 1) { // append to EntryList
|
|
4121 if (List == NULL) {
|
|
4122 iterator->_next = iterator->_prev = NULL ;
|
|
4123 _EntryList = iterator ;
|
|
4124 } else {
|
|
4125 // CONSIDER: finding the tail currently requires a linear-time walk of
|
|
4126 // the EntryList. We can make tail access constant-time by converting to
|
|
4127 // a CDLL instead of using our current DLL.
|
|
4128 ObjectWaiter * Tail ;
|
|
4129 for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
|
|
4130 assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
|
|
4131 Tail->_next = iterator ;
|
|
4132 iterator->_prev = Tail ;
|
|
4133 iterator->_next = NULL ;
|
|
4134 }
|
|
4135 } else
|
|
4136 if (Policy == 2) { // prepend to cxq
|
|
4137 // prepend to cxq
|
|
4138 if (List == NULL) {
|
|
4139 iterator->_next = iterator->_prev = NULL ;
|
|
4140 _EntryList = iterator ;
|
|
4141 } else {
|
|
4142 iterator->TState = ObjectWaiter::TS_CXQ ;
|
|
4143 for (;;) {
|
|
4144 ObjectWaiter * Front = _cxq ;
|
|
4145 iterator->_next = Front ;
|
|
4146 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
|
|
4147 break ;
|
|
4148 }
|
|
4149 }
|
|
4150 }
|
|
4151 } else
|
|
4152 if (Policy == 3) { // append to cxq
|
|
4153 iterator->TState = ObjectWaiter::TS_CXQ ;
|
|
4154 for (;;) {
|
|
4155 ObjectWaiter * Tail ;
|
|
4156 Tail = _cxq ;
|
|
4157 if (Tail == NULL) {
|
|
4158 iterator->_next = NULL ;
|
|
4159 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
|
|
4160 break ;
|
|
4161 }
|
|
4162 } else {
|
|
4163 while (Tail->_next != NULL) Tail = Tail->_next ;
|
|
4164 Tail->_next = iterator ;
|
|
4165 iterator->_prev = Tail ;
|
|
4166 iterator->_next = NULL ;
|
|
4167 break ;
|
|
4168 }
|
|
4169 }
|
|
4170 } else {
|
|
4171 ParkEvent * ev = iterator->_event ;
|
|
4172 iterator->TState = ObjectWaiter::TS_RUN ;
|
|
4173 OrderAccess::fence() ;
|
|
4174 ev->unpark() ;
|
|
4175 }
|
|
4176
|
|
4177 if (Policy < 4) {
|
|
4178 iterator->wait_reenter_begin(this);
|
|
4179 }
|
|
4180
|
|
4181 // _WaitSetLock protects the wait queue, not the EntryList. We could
|
|
4182 // move the add-to-EntryList operation, above, outside the critical section
|
|
4183 // protected by _WaitSetLock. In practice that's not useful. With the
|
|
4184 // exception of wait() timeouts and interrupts the monitor owner
|
|
4185 // is the only thread that grabs _WaitSetLock. There's almost no contention
|
|
4186 // on _WaitSetLock so it's not profitable to reduce the length of the
|
|
4187 // critical section.
|
|
4188 }
|
|
4189
|
|
4190 Thread::SpinRelease (&_WaitSetLock) ;
|
|
4191
|
|
4192 if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) {
|
|
4193 ObjectSynchronizer::_sync_Notifications->inc() ;
|
|
4194 }
|
|
4195 }
|
|
4196
|
|
4197
|
|
4198 void ObjectMonitor::notifyAll(TRAPS) {
|
|
4199 CHECK_OWNER();
|
|
4200 ObjectWaiter* iterator;
|
|
4201 if (_WaitSet == NULL) {
|
|
4202 TEVENT (Empty-NotifyAll) ;
|
|
4203 return ;
|
|
4204 }
|
|
4205 DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
|
|
4206
|
|
4207 int Policy = Knob_MoveNotifyee ;
|
|
4208 int Tally = 0 ;
|
|
4209 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
|
|
4210
|
|
4211 for (;;) {
|
|
4212 iterator = DequeueWaiter () ;
|
|
4213 if (iterator == NULL) break ;
|
|
4214 TEVENT (NotifyAll - Transfer1) ;
|
|
4215 ++Tally ;
|
|
4216
|
|
4217 // Disposition - what might we do with iterator ?
|
|
4218 // a. add it directly to the EntryList - either tail or head.
|
|
4219 // b. push it onto the front of the _cxq.
|
|
4220 // For now we use (a).
|
|
4221 //
|
|
4222 // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset
|
|
4223 // to the EntryList. This could be done more efficiently with a single bulk transfer,
|
|
4224 // but in practice it's not time-critical. Beware too, that in prepend-mode we invert the
|
|
4225 // order of the waiters. Lets say that the waitset is "ABCD" and the EntryList is "XYZ".
|
|
4226 // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will
|
|
4227 // be "DCBAXYZ".
|
|
4228
|
|
4229 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
|
|
4230 guarantee (iterator->_notified == 0, "invariant") ;
|
|
4231 iterator->_notified = 1 ;
|
|
4232 if (Policy != 4) {
|
|
4233 iterator->TState = ObjectWaiter::TS_ENTER ;
|
|
4234 }
|
|
4235
|
|
4236 ObjectWaiter * List = _EntryList ;
|
|
4237 if (List != NULL) {
|
|
4238 assert (List->_prev == NULL, "invariant") ;
|
|
4239 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
|
|
4240 assert (List != iterator, "invariant") ;
|
|
4241 }
|
|
4242
|
|
4243 if (Policy == 0) { // prepend to EntryList
|
|
4244 if (List == NULL) {
|
|
4245 iterator->_next = iterator->_prev = NULL ;
|
|
4246 _EntryList = iterator ;
|
|
4247 } else {
|
|
4248 List->_prev = iterator ;
|
|
4249 iterator->_next = List ;
|
|
4250 iterator->_prev = NULL ;
|
|
4251 _EntryList = iterator ;
|
|
4252 }
|
|
4253 } else
|
|
4254 if (Policy == 1) { // append to EntryList
|
|
4255 if (List == NULL) {
|
|
4256 iterator->_next = iterator->_prev = NULL ;
|
|
4257 _EntryList = iterator ;
|
|
4258 } else {
|
|
4259 // CONSIDER: finding the tail currently requires a linear-time walk of
|
|
4260 // the EntryList. We can make tail access constant-time by converting to
|
|
4261 // a CDLL instead of using our current DLL.
|
|
4262 ObjectWaiter * Tail ;
|
|
4263 for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
|
|
4264 assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
|
|
4265 Tail->_next = iterator ;
|
|
4266 iterator->_prev = Tail ;
|
|
4267 iterator->_next = NULL ;
|
|
4268 }
|
|
4269 } else
|
|
4270 if (Policy == 2) { // prepend to cxq
|
|
4271 // prepend to cxq
|
|
4272 iterator->TState = ObjectWaiter::TS_CXQ ;
|
|
4273 for (;;) {
|
|
4274 ObjectWaiter * Front = _cxq ;
|
|
4275 iterator->_next = Front ;
|
|
4276 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
|
|
4277 break ;
|
|
4278 }
|
|
4279 }
|
|
4280 } else
|
|
4281 if (Policy == 3) { // append to cxq
|
|
4282 iterator->TState = ObjectWaiter::TS_CXQ ;
|
|
4283 for (;;) {
|
|
4284 ObjectWaiter * Tail ;
|
|
4285 Tail = _cxq ;
|
|
4286 if (Tail == NULL) {
|
|
4287 iterator->_next = NULL ;
|
|
4288 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
|
|
4289 break ;
|
|
4290 }
|
|
4291 } else {
|
|
4292 while (Tail->_next != NULL) Tail = Tail->_next ;
|
|
4293 Tail->_next = iterator ;
|
|
4294 iterator->_prev = Tail ;
|
|
4295 iterator->_next = NULL ;
|
|
4296 break ;
|
|
4297 }
|
|
4298 }
|
|
4299 } else {
|
|
4300 ParkEvent * ev = iterator->_event ;
|
|
4301 iterator->TState = ObjectWaiter::TS_RUN ;
|
|
4302 OrderAccess::fence() ;
|
|
4303 ev->unpark() ;
|
|
4304 }
|
|
4305
|
|
4306 if (Policy < 4) {
|
|
4307 iterator->wait_reenter_begin(this);
|
|
4308 }
|
|
4309
|
|
4310 // _WaitSetLock protects the wait queue, not the EntryList. We could
|
|
4311 // move the add-to-EntryList operation, above, outside the critical section
|
|
4312 // protected by _WaitSetLock. In practice that's not useful. With the
|
|
4313 // exception of wait() timeouts and interrupts the monitor owner
|
|
4314 // is the only thread that grabs _WaitSetLock. There's almost no contention
|
|
4315 // on _WaitSetLock so it's not profitable to reduce the length of the
|
|
4316 // critical section.
|
|
4317 }
|
|
4318
|
|
4319 Thread::SpinRelease (&_WaitSetLock) ;
|
|
4320
|
|
4321 if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) {
|
|
4322 ObjectSynchronizer::_sync_Notifications->inc(Tally) ;
|
|
4323 }
|
|
4324 }
|
|
4325
|
|
4326 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
|
|
4327 // TODO-FIXME: remove check_slow() -- it's likely dead.
|
|
4328
|
|
4329 void ObjectMonitor::check_slow(TRAPS) {
|
|
4330 TEVENT (check_slow - throw IMSX) ;
|
|
4331 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
|
|
4332 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
|
|
4333 }
|
|
4334
|
|
4335
|
|
4336 // -------------------------------------------------------------------------
|
|
4337 // The raw monitor subsystem is entirely distinct from normal
|
|
4338 // java-synchronization or jni-synchronization. raw monitors are not
|
|
4339 // associated with objects. They can be implemented in any manner
|
|
4340 // that makes sense. The original implementors decided to piggy-back
|
|
4341 // the raw-monitor implementation on the existing Java objectMonitor mechanism.
|
|
4342 // This flaw needs to fixed. We should reimplement raw monitors as sui-generis.
|
|
4343 // Specifically, we should not implement raw monitors via java monitors.
|
|
4344 // Time permitting, we should disentangle and deconvolve the two implementations
|
|
4345 // and move the resulting raw monitor implementation over to the JVMTI directories.
|
|
4346 // Ideally, the raw monitor implementation would be built on top of
|
|
4347 // park-unpark and nothing else.
|
|
4348 //
|
|
4349 // raw monitors are used mainly by JVMTI
|
|
4350 // The raw monitor implementation borrows the ObjectMonitor structure,
|
|
4351 // but the operators are degenerate and extremely simple.
|
|
4352 //
|
|
4353 // Mixed use of a single objectMonitor instance -- as both a raw monitor
|
|
4354 // and a normal java monitor -- is not permissible.
|
|
4355 //
|
|
4356 // Note that we use the single RawMonitor_lock to protect queue operations for
|
|
4357 // _all_ raw monitors. This is a scalability impediment, but since raw monitor usage
|
|
4358 // is deprecated and rare, this is not of concern. The RawMonitor_lock can not
|
|
4359 // be held indefinitely. The critical sections must be short and bounded.
|
|
4360 //
|
|
4361 // -------------------------------------------------------------------------
|
|
4362
|
|
4363 int ObjectMonitor::SimpleEnter (Thread * Self) {
|
|
4364 for (;;) {
|
|
4365 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
|
|
4366 return OS_OK ;
|
|
4367 }
|
|
4368
|
|
4369 ObjectWaiter Node (Self) ;
|
|
4370 Self->_ParkEvent->reset() ; // strictly optional
|
|
4371 Node.TState = ObjectWaiter::TS_ENTER ;
|
|
4372
|
|
4373 RawMonitor_lock->lock_without_safepoint_check() ;
|
|
4374 Node._next = _EntryList ;
|
|
4375 _EntryList = &Node ;
|
|
4376 OrderAccess::fence() ;
|
|
4377 if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
|
|
4378 _EntryList = Node._next ;
|
|
4379 RawMonitor_lock->unlock() ;
|
|
4380 return OS_OK ;
|
|
4381 }
|
|
4382 RawMonitor_lock->unlock() ;
|
|
4383 while (Node.TState == ObjectWaiter::TS_ENTER) {
|
|
4384 Self->_ParkEvent->park() ;
|
|
4385 }
|
|
4386 }
|
|
4387 }
|
|
4388
|
|
4389 int ObjectMonitor::SimpleExit (Thread * Self) {
|
|
4390 guarantee (_owner == Self, "invariant") ;
|
|
4391 OrderAccess::release_store_ptr (&_owner, NULL) ;
|
|
4392 OrderAccess::fence() ;
|
|
4393 if (_EntryList == NULL) return OS_OK ;
|
|
4394 ObjectWaiter * w ;
|
|
4395
|
|
4396 RawMonitor_lock->lock_without_safepoint_check() ;
|
|
4397 w = _EntryList ;
|
|
4398 if (w != NULL) {
|
|
4399 _EntryList = w->_next ;
|
|
4400 }
|
|
4401 RawMonitor_lock->unlock() ;
|
|
4402 if (w != NULL) {
|
|
4403 guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
|
|
4404 ParkEvent * ev = w->_event ;
|
|
4405 w->TState = ObjectWaiter::TS_RUN ;
|
|
4406 OrderAccess::fence() ;
|
|
4407 ev->unpark() ;
|
|
4408 }
|
|
4409 return OS_OK ;
|
|
4410 }
|
|
4411
|
|
4412 int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) {
|
|
4413 guarantee (_owner == Self , "invariant") ;
|
|
4414 guarantee (_recursions == 0, "invariant") ;
|
|
4415
|
|
4416 ObjectWaiter Node (Self) ;
|
|
4417 Node._notified = 0 ;
|
|
4418 Node.TState = ObjectWaiter::TS_WAIT ;
|
|
4419
|
|
4420 RawMonitor_lock->lock_without_safepoint_check() ;
|
|
4421 Node._next = _WaitSet ;
|
|
4422 _WaitSet = &Node ;
|
|
4423 RawMonitor_lock->unlock() ;
|
|
4424
|
|
4425 SimpleExit (Self) ;
|
|
4426 guarantee (_owner != Self, "invariant") ;
|
|
4427
|
|
4428 int ret = OS_OK ;
|
|
4429 if (millis <= 0) {
|
|
4430 Self->_ParkEvent->park();
|
|
4431 } else {
|
|
4432 ret = Self->_ParkEvent->park(millis);
|
|
4433 }
|
|
4434
|
|
4435 // If thread still resides on the waitset then unlink it.
|
|
4436 // Double-checked locking -- the usage is safe in this context
|
|
4437 // as we TState is volatile and the lock-unlock operators are
|
|
4438 // serializing (barrier-equivalent).
|
|
4439
|
|
4440 if (Node.TState == ObjectWaiter::TS_WAIT) {
|
|
4441 RawMonitor_lock->lock_without_safepoint_check() ;
|
|
4442 if (Node.TState == ObjectWaiter::TS_WAIT) {
|
|
4443 // Simple O(n) unlink, but performance isn't critical here.
|
|
4444 ObjectWaiter * p ;
|
|
4445 ObjectWaiter * q = NULL ;
|
|
4446 for (p = _WaitSet ; p != &Node; p = p->_next) {
|
|
4447 q = p ;
|
|
4448 }
|
|
4449 guarantee (p == &Node, "invariant") ;
|
|
4450 if (q == NULL) {
|
|
4451 guarantee (p == _WaitSet, "invariant") ;
|
|
4452 _WaitSet = p->_next ;
|
|
4453 } else {
|
|
4454 guarantee (p == q->_next, "invariant") ;
|
|
4455 q->_next = p->_next ;
|
|
4456 }
|
|
4457 Node.TState = ObjectWaiter::TS_RUN ;
|
|
4458 }
|
|
4459 RawMonitor_lock->unlock() ;
|
|
4460 }
|
|
4461
|
|
4462 guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
|
|
4463 SimpleEnter (Self) ;
|
|
4464
|
|
4465 guarantee (_owner == Self, "invariant") ;
|
|
4466 guarantee (_recursions == 0, "invariant") ;
|
|
4467 return ret ;
|
|
4468 }
|
|
4469
|
|
4470 int ObjectMonitor::SimpleNotify (Thread * Self, bool All) {
|
|
4471 guarantee (_owner == Self, "invariant") ;
|
|
4472 if (_WaitSet == NULL) return OS_OK ;
|
|
4473
|
|
4474 // We have two options:
|
|
4475 // A. Transfer the threads from the WaitSet to the EntryList
|
|
4476 // B. Remove the thread from the WaitSet and unpark() it.
|
|
4477 //
|
|
4478 // We use (B), which is crude and results in lots of futile
|
|
4479 // context switching. In particular (B) induces lots of contention.
|
|
4480
|
|
4481 ParkEvent * ev = NULL ; // consider using a small auto array ...
|
|
4482 RawMonitor_lock->lock_without_safepoint_check() ;
|
|
4483 for (;;) {
|
|
4484 ObjectWaiter * w = _WaitSet ;
|
|
4485 if (w == NULL) break ;
|
|
4486 _WaitSet = w->_next ;
|
|
4487 if (ev != NULL) { ev->unpark(); ev = NULL; }
|
|
4488 ev = w->_event ;
|
|
4489 OrderAccess::loadstore() ;
|
|
4490 w->TState = ObjectWaiter::TS_RUN ;
|
|
4491 OrderAccess::storeload();
|
|
4492 if (!All) break ;
|
|
4493 }
|
|
4494 RawMonitor_lock->unlock() ;
|
|
4495 if (ev != NULL) ev->unpark();
|
|
4496 return OS_OK ;
|
|
4497 }
|
|
4498
|
|
4499 // Any JavaThread will enter here with state _thread_blocked
|
|
4500 int ObjectMonitor::raw_enter(TRAPS) {
|
|
4501 TEVENT (raw_enter) ;
|
|
4502 void * Contended ;
|
|
4503
|
|
4504 // don't enter raw monitor if thread is being externally suspended, it will
|
|
4505 // surprise the suspender if a "suspended" thread can still enter monitor
|
|
4506 JavaThread * jt = (JavaThread *)THREAD;
|
|
4507 if (THREAD->is_Java_thread()) {
|
|
4508 jt->SR_lock()->lock_without_safepoint_check();
|
|
4509 while (jt->is_external_suspend()) {
|
|
4510 jt->SR_lock()->unlock();
|
|
4511 jt->java_suspend_self();
|
|
4512 jt->SR_lock()->lock_without_safepoint_check();
|
|
4513 }
|
|
4514 // guarded by SR_lock to avoid racing with new external suspend requests.
|
|
4515 Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
|
|
4516 jt->SR_lock()->unlock();
|
|
4517 } else {
|
|
4518 Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
|
|
4519 }
|
|
4520
|
|
4521 if (Contended == THREAD) {
|
|
4522 _recursions ++ ;
|
|
4523 return OM_OK ;
|
|
4524 }
|
|
4525
|
|
4526 if (Contended == NULL) {
|
|
4527 guarantee (_owner == THREAD, "invariant") ;
|
|
4528 guarantee (_recursions == 0, "invariant") ;
|
|
4529 return OM_OK ;
|
|
4530 }
|
|
4531
|
|
4532 THREAD->set_current_pending_monitor(this);
|
|
4533
|
|
4534 if (!THREAD->is_Java_thread()) {
|
|
4535 // No other non-Java threads besides VM thread would acquire
|
|
4536 // a raw monitor.
|
|
4537 assert(THREAD->is_VM_thread(), "must be VM thread");
|
|
4538 SimpleEnter (THREAD) ;
|
|
4539 } else {
|
|
4540 guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
|
|
4541 for (;;) {
|
|
4542 jt->set_suspend_equivalent();
|
|
4543 // cleared by handle_special_suspend_equivalent_condition() or
|
|
4544 // java_suspend_self()
|
|
4545 SimpleEnter (THREAD) ;
|
|
4546
|
|
4547 // were we externally suspended while we were waiting?
|
|
4548 if (!jt->handle_special_suspend_equivalent_condition()) break ;
|
|
4549
|
|
4550 // This thread was externally suspended
|
|
4551 //
|
|
4552 // This logic isn't needed for JVMTI raw monitors,
|
|
4553 // but doesn't hurt just in case the suspend rules change. This
|
|
4554 // logic is needed for the ObjectMonitor.wait() reentry phase.
|
|
4555 // We have reentered the contended monitor, but while we were
|
|
4556 // waiting another thread suspended us. We don't want to reenter
|
|
4557 // the monitor while suspended because that would surprise the
|
|
4558 // thread that suspended us.
|
|
4559 //
|
|
4560 // Drop the lock -
|
|
4561 SimpleExit (THREAD) ;
|
|
4562
|
|
4563 jt->java_suspend_self();
|
|
4564 }
|
|
4565
|
|
4566 assert(_owner == THREAD, "Fatal error with monitor owner!");
|
|
4567 assert(_recursions == 0, "Fatal error with monitor recursions!");
|
|
4568 }
|
|
4569
|
|
4570 THREAD->set_current_pending_monitor(NULL);
|
|
4571 guarantee (_recursions == 0, "invariant") ;
|
|
4572 return OM_OK;
|
|
4573 }
|
|
4574
|
|
4575 // Used mainly for JVMTI raw monitor implementation
|
|
4576 // Also used for ObjectMonitor::wait().
|
|
4577 int ObjectMonitor::raw_exit(TRAPS) {
|
|
4578 TEVENT (raw_exit) ;
|
|
4579 if (THREAD != _owner) {
|
|
4580 return OM_ILLEGAL_MONITOR_STATE;
|
|
4581 }
|
|
4582 if (_recursions > 0) {
|
|
4583 --_recursions ;
|
|
4584 return OM_OK ;
|
|
4585 }
|
|
4586
|
|
4587 void * List = _EntryList ;
|
|
4588 SimpleExit (THREAD) ;
|
|
4589
|
|
4590 return OM_OK;
|
|
4591 }
|
|
4592
|
|
4593 // Used for JVMTI raw monitor implementation.
|
|
4594 // All JavaThreads will enter here with state _thread_blocked
|
|
4595
|
|
4596 int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
|
|
4597 TEVENT (raw_wait) ;
|
|
4598 if (THREAD != _owner) {
|
|
4599 return OM_ILLEGAL_MONITOR_STATE;
|
|
4600 }
|
|
4601
|
|
4602 // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
|
|
4603 // The caller must be able to tolerate spurious returns from raw_wait().
|
|
4604 THREAD->_ParkEvent->reset() ;
|
|
4605 OrderAccess::fence() ;
|
|
4606
|
|
4607 // check interrupt event
|
|
4608 if (interruptible && Thread::is_interrupted(THREAD, true)) {
|
|
4609 return OM_INTERRUPTED;
|
|
4610 }
|
|
4611
|
|
4612 intptr_t save = _recursions ;
|
|
4613 _recursions = 0 ;
|
|
4614 _waiters ++ ;
|
|
4615 if (THREAD->is_Java_thread()) {
|
|
4616 guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
|
|
4617 ((JavaThread *)THREAD)->set_suspend_equivalent();
|
|
4618 }
|
|
4619 int rv = SimpleWait (THREAD, millis) ;
|
|
4620 _recursions = save ;
|
|
4621 _waiters -- ;
|
|
4622
|
|
4623 guarantee (THREAD == _owner, "invariant") ;
|
|
4624 if (THREAD->is_Java_thread()) {
|
|
4625 JavaThread * jSelf = (JavaThread *) THREAD ;
|
|
4626 for (;;) {
|
|
4627 if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
|
|
4628 SimpleExit (THREAD) ;
|
|
4629 jSelf->java_suspend_self();
|
|
4630 SimpleEnter (THREAD) ;
|
|
4631 jSelf->set_suspend_equivalent() ;
|
|
4632 }
|
|
4633 }
|
|
4634 guarantee (THREAD == _owner, "invariant") ;
|
|
4635
|
|
4636 if (interruptible && Thread::is_interrupted(THREAD, true)) {
|
|
4637 return OM_INTERRUPTED;
|
|
4638 }
|
|
4639 return OM_OK ;
|
|
4640 }
|
|
4641
|
|
4642 int ObjectMonitor::raw_notify(TRAPS) {
|
|
4643 TEVENT (raw_notify) ;
|
|
4644 if (THREAD != _owner) {
|
|
4645 return OM_ILLEGAL_MONITOR_STATE;
|
|
4646 }
|
|
4647 SimpleNotify (THREAD, false) ;
|
|
4648 return OM_OK;
|
|
4649 }
|
|
4650
|
|
4651 int ObjectMonitor::raw_notifyAll(TRAPS) {
|
|
4652 TEVENT (raw_notifyAll) ;
|
|
4653 if (THREAD != _owner) {
|
|
4654 return OM_ILLEGAL_MONITOR_STATE;
|
|
4655 }
|
|
4656 SimpleNotify (THREAD, true) ;
|
|
4657 return OM_OK;
|
|
4658 }
|
|
4659
|
|
4660 #ifndef PRODUCT
|
|
4661 void ObjectMonitor::verify() {
|
|
4662 }
|
|
4663
|
|
4664 void ObjectMonitor::print() {
|
|
4665 }
|
|
4666 #endif
|
|
4667
|
|
4668 //------------------------------------------------------------------------------
|
|
4669 // Non-product code
|
|
4670
|
|
4671 #ifndef PRODUCT
|
|
4672
|
|
4673 void ObjectSynchronizer::trace_locking(Handle locking_obj, bool is_compiled,
|
|
4674 bool is_method, bool is_locking) {
|
|
4675 // Don't know what to do here
|
|
4676 }
|
|
4677
|
|
4678 // Verify all monitors in the monitor cache, the verification is weak.
|
|
4679 void ObjectSynchronizer::verify() {
|
|
4680 ObjectMonitor* block = gBlockList;
|
|
4681 ObjectMonitor* mid;
|
|
4682 while (block) {
|
|
4683 assert(block->object() == CHAINMARKER, "must be a block header");
|
|
4684 for (int i = 1; i < _BLOCKSIZE; i++) {
|
|
4685 mid = block + i;
|
|
4686 oop object = (oop) mid->object();
|
|
4687 if (object != NULL) {
|
|
4688 mid->verify();
|
|
4689 }
|
|
4690 }
|
|
4691 block = (ObjectMonitor*) block->FreeNext;
|
|
4692 }
|
|
4693 }
|
|
4694
|
|
4695 // Check if monitor belongs to the monitor cache
|
|
4696 // The list is grow-only so it's *relatively* safe to traverse
|
|
4697 // the list of extant blocks without taking a lock.
|
|
4698
|
|
4699 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
|
|
4700 ObjectMonitor* block = gBlockList;
|
|
4701
|
|
4702 while (block) {
|
|
4703 assert(block->object() == CHAINMARKER, "must be a block header");
|
|
4704 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
|
|
4705 address mon = (address) monitor;
|
|
4706 address blk = (address) block;
|
|
4707 size_t diff = mon - blk;
|
|
4708 assert((diff % sizeof(ObjectMonitor)) == 0, "check");
|
|
4709 return 1;
|
|
4710 }
|
|
4711 block = (ObjectMonitor*) block->FreeNext;
|
|
4712 }
|
|
4713 return 0;
|
|
4714 }
|
|
4715
|
|
4716 #endif
|