Mercurial > hg > truffle
annotate src/share/vm/runtime/synchronizer.cpp @ 14714:b602356a9cfc
additional canonicalizers for accesses and value nodes (improves number of implicit null checks)
author | Lukas Stadler <lukas.stadler@oracle.com> |
---|---|
date | Thu, 20 Mar 2014 17:15:36 +0100 |
parents | d8041d695d19 |
children | 4ca6dc0799b6 |
rev | line source |
---|---|
0 | 1 /* |
14326
e7d8cbe4db3b
8030808: dtrace/hotspot/Monitors/Monitors001 fails in product builds on solaris-sparc
fparain
parents:
14309
diff
changeset
|
2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
702
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
702
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
702
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "classfile/vmSymbols.hpp" | |
27 #include "memory/resourceArea.hpp" | |
28 #include "oops/markOop.hpp" | |
29 #include "oops/oop.inline.hpp" | |
30 #include "runtime/biasedLocking.hpp" | |
31 #include "runtime/handles.inline.hpp" | |
32 #include "runtime/interfaceSupport.hpp" | |
33 #include "runtime/mutexLocker.hpp" | |
34 #include "runtime/objectMonitor.hpp" | |
35 #include "runtime/objectMonitor.inline.hpp" | |
36 #include "runtime/osThread.hpp" | |
37 #include "runtime/stubRoutines.hpp" | |
38 #include "runtime/synchronizer.hpp" | |
7180
f34d701e952e
8003935: Simplify the needed includes for using Thread::current()
stefank
parents:
6983
diff
changeset
|
39 #include "runtime/thread.inline.hpp" |
1972 | 40 #include "utilities/dtrace.hpp" |
41 #include "utilities/events.hpp" | |
42 #include "utilities/preserveException.hpp" | |
43 #ifdef TARGET_OS_FAMILY_linux | |
44 # include "os_linux.inline.hpp" | |
45 #endif | |
46 #ifdef TARGET_OS_FAMILY_solaris | |
47 # include "os_solaris.inline.hpp" | |
48 #endif | |
49 #ifdef TARGET_OS_FAMILY_windows | |
50 # include "os_windows.inline.hpp" | |
51 #endif | |
3960 | 52 #ifdef TARGET_OS_FAMILY_bsd |
53 # include "os_bsd.inline.hpp" | |
54 #endif | |
0 | 55 |
14403
75ef1a499665
8019973: PPC64 (part 11): Fix IA64 preprocessor conditionals on AIX.
goetz
parents:
10405
diff
changeset
|
56 #if defined(__GNUC__) && !defined(PPC64) |
0 | 57 // Need to inhibit inlining for older versions of GCC to avoid build-time failures |
58 #define ATTR __attribute__((noinline)) | |
59 #else | |
60 #define ATTR | |
61 #endif | |
62 | |
63 // The "core" versions of monitor enter and exit reside in this file. | |
64 // The interpreter and compilers contain specialized transliterated | |
65 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), | |
66 // for instance. If you make changes here, make sure to modify the | |
67 // interpreter, and both C1 and C2 fast-path inline locking code emission. | |
68 // | |
69 // | |
70 // ----------------------------------------------------------------------------- | |
71 | |
72 #ifdef DTRACE_ENABLED | |
73 | |
74 // Only bother with this argument setup if dtrace is available | |
75 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. | |
76 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
4006
diff
changeset
|
77 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ |
0 | 78 char* bytes = NULL; \ |
79 int len = 0; \ | |
80 jlong jtid = SharedRuntime::get_java_tid(thread); \ | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
4006
diff
changeset
|
81 Symbol* klassname = ((oop)(obj))->klass()->name(); \ |
0 | 82 if (klassname != NULL) { \ |
83 bytes = (char*)klassname->bytes(); \ | |
84 len = klassname->utf8_length(); \ | |
85 } | |
86 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
4006
diff
changeset
|
87 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ |
4006 | 88 { \ |
89 if (DTraceMonitorProbes) { \ | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
4006
diff
changeset
|
90 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
4006 | 91 HOTSPOT_MONITOR_WAIT(jtid, \ |
92 (uintptr_t)(monitor), bytes, len, (millis)); \ | |
93 } \ | |
94 } | |
95 | |
14179
dbcb1dd0785b
8029726: On OS X some dtrace probe names are mismatched with Solaris
sla
parents:
12316
diff
changeset
|
96 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED |
4006 | 97 |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
4006
diff
changeset
|
98 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ |
4006 | 99 { \ |
100 if (DTraceMonitorProbes) { \ | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
4006
diff
changeset
|
101 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
4006 | 102 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ |
103 (uintptr_t)(monitor), bytes, len); \ | |
104 } \ | |
105 } | |
106 | |
0 | 107 #else // ndef DTRACE_ENABLED |
108 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
4006
diff
changeset
|
109 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
4006
diff
changeset
|
110 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} |
0 | 111 |
112 #endif // ndef DTRACE_ENABLED | |
113 | |
1878 | 114 // This exists only as a workaround of dtrace bug 6254741 |
115 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { | |
116 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); | |
117 return 0; | |
118 } | |
119 | |
120 #define NINFLATIONLOCKS 256 | |
121 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ; | |
122 | |
123 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ; | |
124 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ; | |
125 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ; | |
126 int ObjectSynchronizer::gOmInUseCount = 0; | |
127 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache | |
128 static volatile int MonitorFreeCount = 0 ; // # on gFreeList | |
129 static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation | |
12316
190899198332
7195622: CheckUnhandledOops has limited usefulness now
hseigel
parents:
10405
diff
changeset
|
130 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) |
1878 | 131 |
132 // ----------------------------------------------------------------------------- | |
133 // Fast Monitor Enter/Exit | |
134 // This the fast monitor enter. The interpreter and compiler use | |
135 // some assembly copies of this code. Make sure update those code | |
136 // if the following function is changed. The implementation is | |
137 // extremely sensitive to race condition. Be careful. | |
138 | |
139 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) { | |
140 if (UseBiasedLocking) { | |
141 if (!SafepointSynchronize::is_at_safepoint()) { | |
142 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); | |
143 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { | |
144 return; | |
145 } | |
146 } else { | |
147 assert(!attempt_rebias, "can not rebias toward VM thread"); | |
148 BiasedLocking::revoke_at_safepoint(obj); | |
149 } | |
150 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
151 } | |
152 | |
153 slow_enter (obj, lock, THREAD) ; | |
154 } | |
155 | |
156 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { | |
157 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); | |
158 // if displaced header is null, the previous enter is recursive enter, no-op | |
159 markOop dhw = lock->displaced_header(); | |
160 markOop mark ; | |
161 if (dhw == NULL) { | |
162 // Recursive stack-lock. | |
163 // Diagnostics -- Could be: stack-locked, inflating, inflated. | |
164 mark = object->mark() ; | |
165 assert (!mark->is_neutral(), "invariant") ; | |
166 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { | |
167 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ; | |
168 } | |
169 if (mark->has_monitor()) { | |
170 ObjectMonitor * m = mark->monitor() ; | |
171 assert(((oop)(m->object()))->mark() == mark, "invariant") ; | |
172 assert(m->is_entered(THREAD), "invariant") ; | |
173 } | |
174 return ; | |
175 } | |
176 | |
177 mark = object->mark() ; | |
0 | 178 |
1878 | 179 // If the object is stack-locked by the current thread, try to |
180 // swing the displaced header from the box back to the mark. | |
181 if (mark == (markOop) lock) { | |
182 assert (dhw->is_neutral(), "invariant") ; | |
183 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { | |
184 TEVENT (fast_exit: release stacklock) ; | |
185 return; | |
186 } | |
187 } | |
188 | |
10405 | 189 ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ; |
1878 | 190 } |
191 | |
192 // ----------------------------------------------------------------------------- | |
193 // Interpreter/Compiler Slow Case | |
194 // This routine is used to handle interpreter/compiler slow case | |
195 // We don't need to use fast path here, because it must have been | |
196 // failed in the interpreter/compiler code. | |
197 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { | |
198 markOop mark = obj->mark(); | |
199 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); | |
200 | |
201 if (mark->is_neutral()) { | |
202 // Anticipate successful CAS -- the ST of the displaced mark must | |
203 // be visible <= the ST performed by the CAS. | |
204 lock->set_displaced_header(mark); | |
205 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { | |
206 TEVENT (slow_enter: release stacklock) ; | |
207 return ; | |
208 } | |
209 // Fall through to inflate() ... | |
210 } else | |
211 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { | |
212 assert(lock != mark->locker(), "must not re-lock the same lock"); | |
213 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); | |
214 lock->set_displaced_header(NULL); | |
215 return; | |
216 } | |
217 | |
218 #if 0 | |
219 // The following optimization isn't particularly useful. | |
220 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { | |
221 lock->set_displaced_header (NULL) ; | |
222 return ; | |
223 } | |
224 #endif | |
225 | |
226 // The object header will never be displaced to this lock, | |
227 // so it does not matter what the value is, except that it | |
228 // must be non-zero to avoid looking like a re-entrant lock, | |
229 // and must not look locked either. | |
230 lock->set_displaced_header(markOopDesc::unused_mark()); | |
231 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); | |
232 } | |
233 | |
234 // This routine is used to handle interpreter/compiler slow case | |
235 // We don't need to use fast path here, because it must have | |
236 // failed in the interpreter/compiler code. Simply use the heavy | |
237 // weight monitor should be ok, unless someone find otherwise. | |
238 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { | |
239 fast_exit (object, lock, THREAD) ; | |
240 } | |
241 | |
242 // ----------------------------------------------------------------------------- | |
243 // Class Loader support to workaround deadlocks on the class loader lock objects | |
244 // Also used by GC | |
245 // complete_exit()/reenter() are used to wait on a nested lock | |
246 // i.e. to give up an outer lock completely and then re-enter | |
247 // Used when holding nested locks - lock acquisition order: lock1 then lock2 | |
248 // 1) complete_exit lock1 - saving recursion count | |
249 // 2) wait on lock2 | |
250 // 3) when notified on lock2, unlock lock2 | |
251 // 4) reenter lock1 with original recursion count | |
252 // 5) lock lock2 | |
253 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() | |
254 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { | |
255 TEVENT (complete_exit) ; | |
256 if (UseBiasedLocking) { | |
257 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
258 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
0 | 259 } |
260 | |
1878 | 261 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); |
262 | |
263 return monitor->complete_exit(THREAD); | |
264 } | |
265 | |
266 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() | |
267 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { | |
268 TEVENT (reenter) ; | |
269 if (UseBiasedLocking) { | |
270 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
271 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
272 } | |
273 | |
274 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); | |
275 | |
276 monitor->reenter(recursion, THREAD); | |
277 } | |
278 // ----------------------------------------------------------------------------- | |
279 // JNI locks on java objects | |
280 // NOTE: must use heavy weight monitor to handle jni monitor enter | |
281 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter | |
282 // the current locking is from JNI instead of Java code | |
283 TEVENT (jni_enter) ; | |
284 if (UseBiasedLocking) { | |
285 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
286 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
287 } | |
288 THREAD->set_current_pending_monitor_is_from_java(false); | |
289 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); | |
290 THREAD->set_current_pending_monitor_is_from_java(true); | |
291 } | |
292 | |
293 // NOTE: must use heavy weight monitor to handle jni monitor enter | |
294 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) { | |
295 if (UseBiasedLocking) { | |
296 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
297 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
0 | 298 } |
299 | |
1878 | 300 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj()); |
301 return monitor->try_enter(THREAD); | |
302 } | |
303 | |
304 | |
305 // NOTE: must use heavy weight monitor to handle jni monitor exit | |
306 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { | |
307 TEVENT (jni_exit) ; | |
308 if (UseBiasedLocking) { | |
7628
f3184f32ce0b
6444286: Possible naked oop related to biased locking revocation safepoint in jni_exit()
dcubed
parents:
7180
diff
changeset
|
309 Handle h_obj(THREAD, obj); |
f3184f32ce0b
6444286: Possible naked oop related to biased locking revocation safepoint in jni_exit()
dcubed
parents:
7180
diff
changeset
|
310 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); |
f3184f32ce0b
6444286: Possible naked oop related to biased locking revocation safepoint in jni_exit()
dcubed
parents:
7180
diff
changeset
|
311 obj = h_obj(); |
1878 | 312 } |
313 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
314 | |
315 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); | |
316 // If this thread has locked the object, exit the monitor. Note: can't use | |
317 // monitor->check(CHECK); must exit even if an exception is pending. | |
318 if (monitor->check(THREAD)) { | |
10405 | 319 monitor->exit(true, THREAD); |
0 | 320 } |
1878 | 321 } |
322 | |
323 // ----------------------------------------------------------------------------- | |
324 // Internal VM locks on java objects | |
325 // standard constructor, allows locking failures | |
326 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { | |
327 _dolock = doLock; | |
328 _thread = thread; | |
329 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) | |
330 _obj = obj; | |
0 | 331 |
1878 | 332 if (_dolock) { |
333 TEVENT (ObjectLocker) ; | |
334 | |
335 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); | |
336 } | |
337 } | |
338 | |
339 ObjectLocker::~ObjectLocker() { | |
340 if (_dolock) { | |
341 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); | |
342 } | |
343 } | |
0 | 344 |
345 | |
1878 | 346 // ----------------------------------------------------------------------------- |
347 // Wait/Notify/NotifyAll | |
348 // NOTE: must use heavy weight monitor to handle wait() | |
14326
e7d8cbe4db3b
8030808: dtrace/hotspot/Monitors/Monitors001 fails in product builds on solaris-sparc
fparain
parents:
14309
diff
changeset
|
349 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { |
1878 | 350 if (UseBiasedLocking) { |
351 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
352 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
353 } | |
354 if (millis < 0) { | |
355 TEVENT (wait - throw IAX) ; | |
14326
e7d8cbe4db3b
8030808: dtrace/hotspot/Monitors/Monitors001 fails in product builds on solaris-sparc
fparain
parents:
14309
diff
changeset
|
356 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); |
1878 | 357 } |
358 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); | |
359 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); | |
360 monitor->wait(millis, true, THREAD); | |
361 | |
14326
e7d8cbe4db3b
8030808: dtrace/hotspot/Monitors/Monitors001 fails in product builds on solaris-sparc
fparain
parents:
14309
diff
changeset
|
362 // This dummy call is in place to get around dtrace bug 6254741. Once |
e7d8cbe4db3b
8030808: dtrace/hotspot/Monitors/Monitors001 fails in product builds on solaris-sparc
fparain
parents:
14309
diff
changeset
|
363 // that's fixed we can uncomment the following line, remove the call |
e7d8cbe4db3b
8030808: dtrace/hotspot/Monitors/Monitors001 fails in product builds on solaris-sparc
fparain
parents:
14309
diff
changeset
|
364 // and change this function back into a "void" func. |
1878 | 365 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); |
14326
e7d8cbe4db3b
8030808: dtrace/hotspot/Monitors/Monitors001 fails in product builds on solaris-sparc
fparain
parents:
14309
diff
changeset
|
366 return dtrace_waited_probe(monitor, obj, THREAD); |
1878 | 367 } |
0 | 368 |
1878 | 369 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) { |
370 if (UseBiasedLocking) { | |
371 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
372 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
373 } | |
374 if (millis < 0) { | |
375 TEVENT (wait - throw IAX) ; | |
376 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); | |
377 } | |
378 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ; | |
379 } | |
380 | |
381 void ObjectSynchronizer::notify(Handle obj, TRAPS) { | |
382 if (UseBiasedLocking) { | |
383 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
384 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
385 } | |
0 | 386 |
1878 | 387 markOop mark = obj->mark(); |
388 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { | |
389 return; | |
390 } | |
391 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); | |
392 } | |
0 | 393 |
1878 | 394 // NOTE: see comment of notify() |
395 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { | |
396 if (UseBiasedLocking) { | |
397 BiasedLocking::revoke_and_rebias(obj, false, THREAD); | |
398 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
399 } | |
400 | |
401 markOop mark = obj->mark(); | |
402 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { | |
403 return; | |
404 } | |
405 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); | |
406 } | |
407 | |
408 // ----------------------------------------------------------------------------- | |
409 // Hash Code handling | |
410 // | |
0 | 411 // Performance concern: |
412 // OrderAccess::storestore() calls release() which STs 0 into the global volatile | |
413 // OrderAccess::Dummy variable. This store is unnecessary for correctness. | |
414 // Many threads STing into a common location causes considerable cache migration | |
415 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore() | |
416 // until it's repaired. In some cases OrderAccess::fence() -- which incurs local | |
417 // latency on the executing processor -- is a better choice as it scales on SMP | |
418 // systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a | |
419 // discussion of coherency costs. Note that all our current reference platforms | |
420 // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC. | |
421 // | |
422 // As a general policy we use "volatile" to control compiler-based reordering | |
423 // and explicit fences (barriers) to control for architectural reordering performed | |
424 // by the CPU(s) or platform. | |
425 | |
426 struct SharedGlobals { | |
427 // These are highly shared mostly-read variables. | |
428 // To avoid false-sharing they need to be the sole occupants of a $ line. | |
429 double padPrefix [8]; | |
430 volatile int stwRandom ; | |
431 volatile int stwCycle ; | |
432 | |
433 // Hot RW variables -- Sequester to avoid false-sharing | |
434 double padSuffix [16]; | |
435 volatile int hcSequence ; | |
436 double padFinal [8] ; | |
437 } ; | |
438 | |
439 static SharedGlobals GVars ; | |
1587 | 440 static int MonitorScavengeThreshold = 1000000 ; |
441 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending | |
0 | 442 |
1878 | 443 static markOop ReadStableMark (oop obj) { |
444 markOop mark = obj->mark() ; | |
445 if (!mark->is_being_inflated()) { | |
446 return mark ; // normal fast-path return | |
447 } | |
0 | 448 |
1878 | 449 int its = 0 ; |
450 for (;;) { | |
451 markOop mark = obj->mark() ; | |
452 if (!mark->is_being_inflated()) { | |
453 return mark ; // normal fast-path return | |
454 } | |
455 | |
456 // The object is being inflated by some other thread. | |
457 // The caller of ReadStableMark() must wait for inflation to complete. | |
458 // Avoid live-lock | |
459 // TODO: consider calling SafepointSynchronize::do_call_back() while | |
460 // spinning to see if there's a safepoint pending. If so, immediately | |
461 // yielding or blocking would be appropriate. Avoid spinning while | |
462 // there is a safepoint pending. | |
463 // TODO: add inflation contention performance counters. | |
464 // TODO: restrict the aggregate number of spinners. | |
0 | 465 |
1878 | 466 ++its ; |
467 if (its > 10000 || !os::is_MP()) { | |
468 if (its & 1) { | |
469 os::NakedYield() ; | |
470 TEVENT (Inflate: INFLATING - yield) ; | |
471 } else { | |
472 // Note that the following code attenuates the livelock problem but is not | |
473 // a complete remedy. A more complete solution would require that the inflating | |
474 // thread hold the associated inflation lock. The following code simply restricts | |
475 // the number of spinners to at most one. We'll have N-2 threads blocked | |
476 // on the inflationlock, 1 thread holding the inflation lock and using | |
477 // a yield/park strategy, and 1 thread in the midst of inflation. | |
478 // A more refined approach would be to change the encoding of INFLATING | |
479 // to allow encapsulation of a native thread pointer. Threads waiting for | |
480 // inflation to complete would use CAS to push themselves onto a singly linked | |
481 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag | |
482 // and calling park(). When inflation was complete the thread that accomplished inflation | |
483 // would detach the list and set the markword to inflated with a single CAS and | |
484 // then for each thread on the list, set the flag and unpark() the thread. | |
485 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease | |
486 // wakes at most one thread whereas we need to wake the entire list. | |
12316
190899198332
7195622: CheckUnhandledOops has limited usefulness now
hseigel
parents:
10405
diff
changeset
|
487 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1) ; |
1878 | 488 int YieldThenBlock = 0 ; |
489 assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ; | |
490 assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ; | |
491 Thread::muxAcquire (InflationLocks + ix, "InflationLock") ; | |
492 while (obj->mark() == markOopDesc::INFLATING()) { | |
493 // Beware: NakedYield() is advisory and has almost no effect on some platforms | |
494 // so we periodically call Self->_ParkEvent->park(1). | |
495 // We use a mixed spin/yield/block mechanism. | |
496 if ((YieldThenBlock++) >= 16) { | |
497 Thread::current()->_ParkEvent->park(1) ; | |
498 } else { | |
499 os::NakedYield() ; | |
500 } | |
501 } | |
502 Thread::muxRelease (InflationLocks + ix ) ; | |
503 TEVENT (Inflate: INFLATING - yield/park) ; | |
504 } | |
505 } else { | |
506 SpinPause() ; // SMP-polite spinning | |
507 } | |
508 } | |
509 } | |
0 | 510 |
511 // hashCode() generation : | |
512 // | |
513 // Possibilities: | |
514 // * MD5Digest of {obj,stwRandom} | |
515 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. | |
516 // * A DES- or AES-style SBox[] mechanism | |
517 // * One of the Phi-based schemes, such as: | |
518 // 2654435761 = 2^32 * Phi (golden ratio) | |
519 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; | |
520 // * A variation of Marsaglia's shift-xor RNG scheme. | |
521 // * (obj ^ stwRandom) is appealing, but can result | |
522 // in undesirable regularity in the hashCode values of adjacent objects | |
523 // (objects allocated back-to-back, in particular). This could potentially | |
524 // result in hashtable collisions and reduced hashtable efficiency. | |
525 // There are simple ways to "diffuse" the middle address bits over the | |
526 // generated hashCode values: | |
527 // | |
528 | |
529 static inline intptr_t get_next_hash(Thread * Self, oop obj) { | |
530 intptr_t value = 0 ; | |
531 if (hashCode == 0) { | |
532 // This form uses an unguarded global Park-Miller RNG, | |
533 // so it's possible for two threads to race and generate the same RNG. | |
534 // On MP system we'll have lots of RW access to a global, so the | |
535 // mechanism induces lots of coherency traffic. | |
536 value = os::random() ; | |
537 } else | |
538 if (hashCode == 1) { | |
539 // This variation has the property of being stable (idempotent) | |
540 // between STW operations. This can be useful in some of the 1-0 | |
541 // synchronization schemes. | |
12316
190899198332
7195622: CheckUnhandledOops has limited usefulness now
hseigel
parents:
10405
diff
changeset
|
542 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3 ; |
0 | 543 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ; |
544 } else | |
545 if (hashCode == 2) { | |
546 value = 1 ; // for sensitivity testing | |
547 } else | |
548 if (hashCode == 3) { | |
549 value = ++GVars.hcSequence ; | |
550 } else | |
551 if (hashCode == 4) { | |
12316
190899198332
7195622: CheckUnhandledOops has limited usefulness now
hseigel
parents:
10405
diff
changeset
|
552 value = cast_from_oop<intptr_t>(obj) ; |
0 | 553 } else { |
554 // Marsaglia's xor-shift scheme with thread-specific state | |
555 // This is probably the best overall implementation -- we'll | |
556 // likely make this the default in future releases. | |
557 unsigned t = Self->_hashStateX ; | |
558 t ^= (t << 11) ; | |
559 Self->_hashStateX = Self->_hashStateY ; | |
560 Self->_hashStateY = Self->_hashStateZ ; | |
561 Self->_hashStateZ = Self->_hashStateW ; | |
562 unsigned v = Self->_hashStateW ; | |
563 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ; | |
564 Self->_hashStateW = v ; | |
565 value = v ; | |
566 } | |
567 | |
568 value &= markOopDesc::hash_mask; | |
569 if (value == 0) value = 0xBAD ; | |
570 assert (value != markOopDesc::no_hash, "invariant") ; | |
571 TEVENT (hashCode: GENERATE) ; | |
572 return value; | |
573 } | |
1878 | 574 // |
575 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) { | |
576 if (UseBiasedLocking) { | |
577 // NOTE: many places throughout the JVM do not expect a safepoint | |
578 // to be taken here, in particular most operations on perm gen | |
579 // objects. However, we only ever bias Java instances and all of | |
580 // the call sites of identity_hash that might revoke biases have | |
581 // been checked to make sure they can handle a safepoint. The | |
582 // added check of the bias pattern is to avoid useless calls to | |
583 // thread-local storage. | |
584 if (obj->mark()->has_bias_pattern()) { | |
585 // Box and unbox the raw reference just in case we cause a STW safepoint. | |
586 Handle hobj (Self, obj) ; | |
587 // Relaxing assertion for bug 6320749. | |
588 assert (Universe::verify_in_progress() || | |
589 !SafepointSynchronize::is_at_safepoint(), | |
590 "biases should not be seen by VM thread here"); | |
591 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); | |
592 obj = hobj() ; | |
593 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
594 } | |
595 } | |
0 | 596 |
1878 | 597 // hashCode() is a heap mutator ... |
598 // Relaxing assertion for bug 6320749. | |
599 assert (Universe::verify_in_progress() || | |
600 !SafepointSynchronize::is_at_safepoint(), "invariant") ; | |
601 assert (Universe::verify_in_progress() || | |
602 Self->is_Java_thread() , "invariant") ; | |
603 assert (Universe::verify_in_progress() || | |
604 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; | |
605 | |
606 ObjectMonitor* monitor = NULL; | |
607 markOop temp, test; | |
608 intptr_t hash; | |
609 markOop mark = ReadStableMark (obj); | |
610 | |
611 // object should remain ineligible for biased locking | |
612 assert (!mark->has_bias_pattern(), "invariant") ; | |
613 | |
614 if (mark->is_neutral()) { | |
615 hash = mark->hash(); // this is a normal header | |
616 if (hash) { // if it has hash, just return it | |
617 return hash; | |
618 } | |
619 hash = get_next_hash(Self, obj); // allocate a new hash code | |
620 temp = mark->copy_set_hash(hash); // merge the hash code into header | |
621 // use (machine word version) atomic operation to install the hash | |
622 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); | |
623 if (test == mark) { | |
624 return hash; | |
625 } | |
626 // If atomic operation failed, we must inflate the header | |
627 // into heavy weight monitor. We could add more code here | |
628 // for fast path, but it does not worth the complexity. | |
629 } else if (mark->has_monitor()) { | |
630 monitor = mark->monitor(); | |
631 temp = monitor->header(); | |
632 assert (temp->is_neutral(), "invariant") ; | |
633 hash = temp->hash(); | |
634 if (hash) { | |
635 return hash; | |
636 } | |
637 // Skip to the following code to reduce code size | |
638 } else if (Self->is_lock_owned((address)mark->locker())) { | |
639 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned | |
640 assert (temp->is_neutral(), "invariant") ; | |
641 hash = temp->hash(); // by current thread, check if the displaced | |
642 if (hash) { // header contains hash code | |
643 return hash; | |
644 } | |
645 // WARNING: | |
646 // The displaced header is strictly immutable. | |
647 // It can NOT be changed in ANY cases. So we have | |
648 // to inflate the header into heavyweight monitor | |
649 // even the current thread owns the lock. The reason | |
650 // is the BasicLock (stack slot) will be asynchronously | |
651 // read by other threads during the inflate() function. | |
652 // Any change to stack may not propagate to other threads | |
653 // correctly. | |
654 } | |
655 | |
656 // Inflate the monitor to set hash code | |
657 monitor = ObjectSynchronizer::inflate(Self, obj); | |
658 // Load displaced header and check it has hash code | |
659 mark = monitor->header(); | |
660 assert (mark->is_neutral(), "invariant") ; | |
661 hash = mark->hash(); | |
662 if (hash == 0) { | |
663 hash = get_next_hash(Self, obj); | |
664 temp = mark->copy_set_hash(hash); // merge hash code into header | |
665 assert (temp->is_neutral(), "invariant") ; | |
666 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); | |
667 if (test != mark) { | |
668 // The only update to the header in the monitor (outside GC) | |
669 // is install the hash code. If someone add new usage of | |
670 // displaced header, please update this code | |
671 hash = test->hash(); | |
672 assert (test->is_neutral(), "invariant") ; | |
673 assert (hash != 0, "Trivial unexpected object/monitor header usage."); | |
674 } | |
675 } | |
676 // We finally get the hash | |
677 return hash; | |
0 | 678 } |
679 | |
1878 | 680 // Deprecated -- use FastHashCode() instead. |
0 | 681 |
1878 | 682 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { |
683 return FastHashCode (Thread::current(), obj()) ; | |
0 | 684 } |
685 | |
686 | |
1878 | 687 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, |
688 Handle h_obj) { | |
689 if (UseBiasedLocking) { | |
690 BiasedLocking::revoke_and_rebias(h_obj, false, thread); | |
691 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
692 } | |
693 | |
694 assert(thread == JavaThread::current(), "Can only be called on current thread"); | |
695 oop obj = h_obj(); | |
696 | |
697 markOop mark = ReadStableMark (obj) ; | |
0 | 698 |
1878 | 699 // Uncontended case, header points to stack |
700 if (mark->has_locker()) { | |
701 return thread->is_lock_owned((address)mark->locker()); | |
0 | 702 } |
1878 | 703 // Contended case, header points to ObjectMonitor (tagged pointer) |
704 if (mark->has_monitor()) { | |
705 ObjectMonitor* monitor = mark->monitor(); | |
706 return monitor->is_entered(thread) != 0 ; | |
0 | 707 } |
1878 | 708 // Unlocked case, header in place |
709 assert(mark->is_neutral(), "sanity check"); | |
710 return false; | |
0 | 711 } |
712 | |
1878 | 713 // Be aware of this method could revoke bias of the lock object. |
14309 | 714 // This method queries the ownership of the lock handle specified by 'h_obj'. |
1878 | 715 // If the current thread owns the lock, it returns owner_self. If no |
716 // thread owns the lock, it returns owner_none. Otherwise, it will return | |
14309 | 717 // owner_other. |
1878 | 718 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership |
719 (JavaThread *self, Handle h_obj) { | |
720 // The caller must beware this method can revoke bias, and | |
721 // revocation can result in a safepoint. | |
722 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; | |
723 assert (self->thread_state() != _thread_blocked , "invariant") ; | |
0 | 724 |
1878 | 725 // Possible mark states: neutral, biased, stack-locked, inflated |
726 | |
727 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { | |
728 // CASE: biased | |
729 BiasedLocking::revoke_and_rebias(h_obj, false, self); | |
730 assert(!h_obj->mark()->has_bias_pattern(), | |
731 "biases should be revoked by now"); | |
732 } | |
0 | 733 |
1878 | 734 assert(self == JavaThread::current(), "Can only be called on current thread"); |
735 oop obj = h_obj(); | |
736 markOop mark = ReadStableMark (obj) ; | |
737 | |
738 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. | |
739 if (mark->has_locker()) { | |
740 return self->is_lock_owned((address)mark->locker()) ? | |
741 owner_self : owner_other; | |
742 } | |
0 | 743 |
1878 | 744 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. |
745 // The Object:ObjectMonitor relationship is stable as long as we're | |
746 // not at a safepoint. | |
747 if (mark->has_monitor()) { | |
748 void * owner = mark->monitor()->_owner ; | |
749 if (owner == NULL) return owner_none ; | |
750 return (owner == self || | |
751 self->is_lock_owned((address)owner)) ? owner_self : owner_other; | |
752 } | |
753 | |
754 // CASE: neutral | |
755 assert(mark->is_neutral(), "sanity check"); | |
756 return owner_none ; // it's unlocked | |
757 } | |
0 | 758 |
1878 | 759 // FIXME: jvmti should call this |
760 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { | |
761 if (UseBiasedLocking) { | |
762 if (SafepointSynchronize::is_at_safepoint()) { | |
763 BiasedLocking::revoke_at_safepoint(h_obj); | |
764 } else { | |
765 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); | |
766 } | |
767 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); | |
768 } | |
769 | |
770 oop obj = h_obj(); | |
771 address owner = NULL; | |
772 | |
773 markOop mark = ReadStableMark (obj) ; | |
774 | |
775 // Uncontended case, header points to stack | |
776 if (mark->has_locker()) { | |
777 owner = (address) mark->locker(); | |
778 } | |
779 | |
780 // Contended case, header points to ObjectMonitor (tagged pointer) | |
781 if (mark->has_monitor()) { | |
782 ObjectMonitor* monitor = mark->monitor(); | |
783 assert(monitor != NULL, "monitor should be non-null"); | |
784 owner = (address) monitor->owner(); | |
785 } | |
786 | |
787 if (owner != NULL) { | |
8673
5ee250974db9
8007476: assert(the_owner != NULL) failed: Did not find owning Java thread for lock word address
dcubed
parents:
7994
diff
changeset
|
788 // owning_thread_from_monitor_owner() may also return NULL here |
1878 | 789 return Threads::owning_thread_from_monitor_owner(owner, doLock); |
790 } | |
791 | |
792 // Unlocked case, header in place | |
793 // Cannot have assertion since this object may have been | |
794 // locked by another thread when reaching here. | |
795 // assert(mark->is_neutral(), "sanity check"); | |
796 | |
797 return NULL; | |
798 } | |
799 // Visitors ... | |
800 | |
801 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { | |
802 ObjectMonitor* block = gBlockList; | |
803 ObjectMonitor* mid; | |
804 while (block) { | |
805 assert(block->object() == CHAINMARKER, "must be a block header"); | |
806 for (int i = _BLOCKSIZE - 1; i > 0; i--) { | |
807 mid = block + i; | |
808 oop object = (oop) mid->object(); | |
809 if (object != NULL) { | |
810 closure->do_monitor(mid); | |
811 } | |
812 } | |
813 block = (ObjectMonitor*) block->FreeNext; | |
0 | 814 } |
815 } | |
816 | |
1878 | 817 // Get the next block in the block list. |
818 static inline ObjectMonitor* next(ObjectMonitor* block) { | |
819 assert(block->object() == CHAINMARKER, "must be a block header"); | |
820 block = block->FreeNext ; | |
821 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); | |
822 return block; | |
0 | 823 } |
824 | |
825 | |
1878 | 826 void ObjectSynchronizer::oops_do(OopClosure* f) { |
827 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); | |
828 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { | |
829 assert(block->object() == CHAINMARKER, "must be a block header"); | |
830 for (int i = 1; i < _BLOCKSIZE; i++) { | |
831 ObjectMonitor* mid = &block[i]; | |
832 if (mid->object() != NULL) { | |
833 f->do_oop((oop*)mid->object_addr()); | |
0 | 834 } |
835 } | |
836 } | |
837 } | |
838 | |
839 | |
1878 | 840 // ----------------------------------------------------------------------------- |
0 | 841 // ObjectMonitor Lifecycle |
842 // ----------------------- | |
843 // Inflation unlinks monitors from the global gFreeList and | |
844 // associates them with objects. Deflation -- which occurs at | |
845 // STW-time -- disassociates idle monitors from objects. Such | |
846 // scavenged monitors are returned to the gFreeList. | |
847 // | |
848 // The global list is protected by ListLock. All the critical sections | |
849 // are short and operate in constant-time. | |
850 // | |
851 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. | |
852 // | |
853 // Lifecycle: | |
854 // -- unassigned and on the global free list | |
855 // -- unassigned and on a thread's private omFreeList | |
856 // -- assigned to an object. The object is inflated and the mark refers | |
857 // to the objectmonitor. | |
858 // | |
859 | |
860 | |
1587 | 861 // Constraining monitor pool growth via MonitorBound ... |
862 // | |
863 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the | |
864 // the rate of scavenging is driven primarily by GC. As such, we can find | |
865 // an inordinate number of monitors in circulation. | |
866 // To avoid that scenario we can artificially induce a STW safepoint | |
867 // if the pool appears to be growing past some reasonable bound. | |
868 // Generally we favor time in space-time tradeoffs, but as there's no | |
869 // natural back-pressure on the # of extant monitors we need to impose some | |
870 // type of limit. Beware that if MonitorBound is set to too low a value | |
871 // we could just loop. In addition, if MonitorBound is set to a low value | |
872 // we'll incur more safepoints, which are harmful to performance. | |
873 // See also: GuaranteedSafepointInterval | |
874 // | |
875 // The current implementation uses asynchronous VM operations. | |
876 // | |
877 | |
878 static void InduceScavenge (Thread * Self, const char * Whence) { | |
879 // Induce STW safepoint to trim monitors | |
880 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. | |
881 // More precisely, trigger an asynchronous STW safepoint as the number | |
882 // of active monitors passes the specified threshold. | |
883 // TODO: assert thread state is reasonable | |
884 | |
885 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { | |
1878 | 886 if (ObjectMonitor::Knob_Verbose) { |
1587 | 887 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ; |
888 ::fflush(stdout) ; | |
889 } | |
890 // Induce a 'null' safepoint to scavenge monitors | |
891 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted | |
892 // to the VMthread and have a lifespan longer than that of this activation record. | |
893 // The VMThread will delete the op when completed. | |
894 VMThread::execute (new VM_ForceAsyncSafepoint()) ; | |
895 | |
1878 | 896 if (ObjectMonitor::Knob_Verbose) { |
1587 | 897 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; |
898 ::fflush(stdout) ; | |
899 } | |
900 } | |
901 } | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
902 /* Too slow for general assert or debug |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
903 void ObjectSynchronizer::verifyInUse (Thread *Self) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
904 ObjectMonitor* mid; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
905 int inusetally = 0; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
906 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
907 inusetally ++; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
908 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
909 assert(inusetally == Self->omInUseCount, "inuse count off"); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
910 |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
911 int freetally = 0; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
912 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
913 freetally ++; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
914 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
915 assert(freetally == Self->omFreeCount, "free count off"); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
916 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
917 */ |
0 | 918 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) { |
919 // A large MAXPRIVATE value reduces both list lock contention | |
920 // and list coherency traffic, but also tends to increase the | |
921 // number of objectMonitors in circulation as well as the STW | |
922 // scavenge costs. As usual, we lean toward time in space-time | |
923 // tradeoffs. | |
924 const int MAXPRIVATE = 1024 ; | |
925 for (;;) { | |
926 ObjectMonitor * m ; | |
927 | |
928 // 1: try to allocate from the thread's local omFreeList. | |
929 // Threads will attempt to allocate first from their local list, then | |
930 // from the global list, and only after those attempts fail will the thread | |
931 // attempt to instantiate new monitors. Thread-local free lists take | |
932 // heat off the ListLock and improve allocation latency, as well as reducing | |
933 // coherency traffic on the shared global list. | |
934 m = Self->omFreeList ; | |
935 if (m != NULL) { | |
936 Self->omFreeList = m->FreeNext ; | |
937 Self->omFreeCount -- ; | |
938 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene | |
939 guarantee (m->object() == NULL, "invariant") ; | |
1587 | 940 if (MonitorInUseLists) { |
941 m->FreeNext = Self->omInUseList; | |
942 Self->omInUseList = m; | |
943 Self->omInUseCount ++; | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
944 // verifyInUse(Self); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
945 } else { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
946 m->FreeNext = NULL; |
1587 | 947 } |
0 | 948 return m ; |
949 } | |
950 | |
951 // 2: try to allocate from the global gFreeList | |
952 // CONSIDER: use muxTry() instead of muxAcquire(). | |
953 // If the muxTry() fails then drop immediately into case 3. | |
954 // If we're using thread-local free lists then try | |
955 // to reprovision the caller's free list. | |
956 if (gFreeList != NULL) { | |
957 // Reprovision the thread's omFreeList. | |
958 // Use bulk transfers to reduce the allocation rate and heat | |
959 // on various locks. | |
960 Thread::muxAcquire (&ListLock, "omAlloc") ; | |
961 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) { | |
1587 | 962 MonitorFreeCount --; |
0 | 963 ObjectMonitor * take = gFreeList ; |
964 gFreeList = take->FreeNext ; | |
965 guarantee (take->object() == NULL, "invariant") ; | |
966 guarantee (!take->is_busy(), "invariant") ; | |
967 take->Recycle() ; | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
968 omRelease (Self, take, false) ; |
0 | 969 } |
970 Thread::muxRelease (&ListLock) ; | |
971 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ; | |
972 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ; | |
973 TEVENT (omFirst - reprovision) ; | |
1587 | 974 |
975 const int mx = MonitorBound ; | |
976 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) { | |
977 // We can't safely induce a STW safepoint from omAlloc() as our thread | |
978 // state may not be appropriate for such activities and callers may hold | |
979 // naked oops, so instead we defer the action. | |
980 InduceScavenge (Self, "omAlloc") ; | |
981 } | |
982 continue; | |
0 | 983 } |
984 | |
985 // 3: allocate a block of new ObjectMonitors | |
986 // Both the local and global free lists are empty -- resort to malloc(). | |
987 // In the current implementation objectMonitors are TSM - immortal. | |
988 assert (_BLOCKSIZE > 1, "invariant") ; | |
10135 | 989 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE]; |
0 | 990 |
991 // NOTE: (almost) no way to recover if allocation failed. | |
992 // We might be able to induce a STW safepoint and scavenge enough | |
993 // objectMonitors to permit progress. | |
994 if (temp == NULL) { | |
10161
746b070f5022
8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
ccheung
parents:
10135
diff
changeset
|
995 vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR, |
746b070f5022
8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
ccheung
parents:
10135
diff
changeset
|
996 "Allocate ObjectMonitors"); |
0 | 997 } |
998 | |
999 // Format the block. | |
1000 // initialize the linked list, each monitor points to its next | |
1001 // forming the single linked free list, the very first monitor | |
1002 // will points to next block, which forms the block list. | |
1003 // The trick of using the 1st element in the block as gBlockList | |
1004 // linkage should be reconsidered. A better implementation would | |
1005 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } | |
1006 | |
1007 for (int i = 1; i < _BLOCKSIZE ; i++) { | |
1008 temp[i].FreeNext = &temp[i+1]; | |
1009 } | |
1010 | |
1011 // terminate the last monitor as the end of list | |
1012 temp[_BLOCKSIZE - 1].FreeNext = NULL ; | |
1013 | |
1014 // Element [0] is reserved for global list linkage | |
1015 temp[0].set_object(CHAINMARKER); | |
1016 | |
1017 // Consider carving out this thread's current request from the | |
1018 // block in hand. This avoids some lock traffic and redundant | |
1019 // list activity. | |
1020 | |
1021 // Acquire the ListLock to manipulate BlockList and FreeList. | |
1022 // An Oyama-Taura-Yonezawa scheme might be more efficient. | |
1023 Thread::muxAcquire (&ListLock, "omAlloc [2]") ; | |
1587 | 1024 MonitorPopulation += _BLOCKSIZE-1; |
1025 MonitorFreeCount += _BLOCKSIZE-1; | |
0 | 1026 |
1027 // Add the new block to the list of extant blocks (gBlockList). | |
1028 // The very first objectMonitor in a block is reserved and dedicated. | |
1029 // It serves as blocklist "next" linkage. | |
1030 temp[0].FreeNext = gBlockList; | |
1031 gBlockList = temp; | |
1032 | |
1033 // Add the new string of objectMonitors to the global free list | |
1034 temp[_BLOCKSIZE - 1].FreeNext = gFreeList ; | |
1035 gFreeList = temp + 1; | |
1036 Thread::muxRelease (&ListLock) ; | |
1037 TEVENT (Allocate block of monitors) ; | |
1038 } | |
1039 } | |
1040 | |
1041 // Place "m" on the caller's private per-thread omFreeList. | |
1042 // In practice there's no need to clamp or limit the number of | |
1043 // monitors on a thread's omFreeList as the only time we'll call | |
1044 // omRelease is to return a monitor to the free list after a CAS | |
1045 // attempt failed. This doesn't allow unbounded #s of monitors to | |
1046 // accumulate on a thread's free list. | |
1047 // | |
1048 | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1049 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) { |
0 | 1050 guarantee (m->object() == NULL, "invariant") ; |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1051 |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1052 // Remove from omInUseList |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1053 if (MonitorInUseLists && fromPerThreadAlloc) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1054 ObjectMonitor* curmidinuse = NULL; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1055 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1056 if (m == mid) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1057 // extract from per-thread in-use-list |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1058 if (mid == Self->omInUseList) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1059 Self->omInUseList = mid->FreeNext; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1060 } else if (curmidinuse != NULL) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1061 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1062 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1063 Self->omInUseCount --; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1064 // verifyInUse(Self); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1065 break; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1066 } else { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1067 curmidinuse = mid; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1068 mid = mid->FreeNext; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1069 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1070 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1071 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1072 |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1073 // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1074 m->FreeNext = Self->omFreeList ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1075 Self->omFreeList = m ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1076 Self->omFreeCount ++ ; |
0 | 1077 } |
1078 | |
1079 // Return the monitors of a moribund thread's local free list to | |
1080 // the global free list. Typically a thread calls omFlush() when | |
1081 // it's dying. We could also consider having the VM thread steal | |
1082 // monitors from threads that have not run java code over a few | |
1083 // consecutive STW safepoints. Relatedly, we might decay | |
1084 // omFreeProvision at STW safepoints. | |
1085 // | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1086 // Also return the monitors of a moribund thread"s omInUseList to |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1087 // a global gOmInUseList under the global list lock so these |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1088 // will continue to be scanned. |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1089 // |
0 | 1090 // We currently call omFlush() from the Thread:: dtor _after the thread |
1091 // has been excised from the thread list and is no longer a mutator. | |
1092 // That means that omFlush() can run concurrently with a safepoint and | |
1093 // the scavenge operator. Calling omFlush() from JavaThread::exit() might | |
1094 // be a better choice as we could safely reason that that the JVM is | |
1095 // not at a safepoint at the time of the call, and thus there could | |
1096 // be not inopportune interleavings between omFlush() and the scavenge | |
1097 // operator. | |
1098 | |
1099 void ObjectSynchronizer::omFlush (Thread * Self) { | |
1100 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL | |
1101 Self->omFreeList = NULL ; | |
1102 ObjectMonitor * Tail = NULL ; | |
1587 | 1103 int Tally = 0; |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1104 if (List != NULL) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1105 ObjectMonitor * s ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1106 for (s = List ; s != NULL ; s = s->FreeNext) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1107 Tally ++ ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1108 Tail = s ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1109 guarantee (s->object() == NULL, "invariant") ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1110 guarantee (!s->is_busy(), "invariant") ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1111 s->set_owner (NULL) ; // redundant but good hygiene |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1112 TEVENT (omFlush - Move one) ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1113 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1114 guarantee (Tail != NULL && List != NULL, "invariant") ; |
0 | 1115 } |
1116 | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1117 ObjectMonitor * InUseList = Self->omInUseList; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1118 ObjectMonitor * InUseTail = NULL ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1119 int InUseTally = 0; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1120 if (InUseList != NULL) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1121 Self->omInUseList = NULL; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1122 ObjectMonitor *curom; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1123 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1124 InUseTail = curom; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1125 InUseTally++; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1126 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1127 // TODO debug |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1128 assert(Self->omInUseCount == InUseTally, "inuse count off"); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1129 Self->omInUseCount = 0; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1130 guarantee (InUseTail != NULL && InUseList != NULL, "invariant"); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1131 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1132 |
0 | 1133 Thread::muxAcquire (&ListLock, "omFlush") ; |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1134 if (Tail != NULL) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1135 Tail->FreeNext = gFreeList ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1136 gFreeList = List ; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1137 MonitorFreeCount += Tally; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1138 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1139 |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1140 if (InUseTail != NULL) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1141 InUseTail->FreeNext = gOmInUseList; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1142 gOmInUseList = InUseList; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1143 gOmInUseCount += InUseTally; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1144 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1145 |
0 | 1146 Thread::muxRelease (&ListLock) ; |
1147 TEVENT (omFlush) ; | |
1148 } | |
1149 | |
1150 // Fast path code shared by multiple functions | |
1151 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { | |
1152 markOop mark = obj->mark(); | |
1153 if (mark->has_monitor()) { | |
1154 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); | |
1155 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); | |
1156 return mark->monitor(); | |
1157 } | |
1158 return ObjectSynchronizer::inflate(Thread::current(), obj); | |
1159 } | |
1160 | |
1878 | 1161 |
0 | 1162 // Note that we could encounter some performance loss through false-sharing as |
1163 // multiple locks occupy the same $ line. Padding might be appropriate. | |
1164 | |
1165 | |
1166 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) { | |
1167 // Inflate mutates the heap ... | |
1168 // Relaxing assertion for bug 6320749. | |
1169 assert (Universe::verify_in_progress() || | |
1170 !SafepointSynchronize::is_at_safepoint(), "invariant") ; | |
1171 | |
1172 for (;;) { | |
1173 const markOop mark = object->mark() ; | |
1174 assert (!mark->has_bias_pattern(), "invariant") ; | |
1175 | |
1176 // The mark can be in one of the following states: | |
1177 // * Inflated - just return | |
1178 // * Stack-locked - coerce it to inflated | |
1179 // * INFLATING - busy wait for conversion to complete | |
1180 // * Neutral - aggressively inflate the object. | |
1181 // * BIASED - Illegal. We should never see this | |
1182 | |
1183 // CASE: inflated | |
1184 if (mark->has_monitor()) { | |
1185 ObjectMonitor * inf = mark->monitor() ; | |
1186 assert (inf->header()->is_neutral(), "invariant"); | |
1187 assert (inf->object() == object, "invariant") ; | |
1188 assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); | |
1189 return inf ; | |
1190 } | |
1191 | |
1192 // CASE: inflation in progress - inflating over a stack-lock. | |
1193 // Some other thread is converting from stack-locked to inflated. | |
1194 // Only that thread can complete inflation -- other threads must wait. | |
1195 // The INFLATING value is transient. | |
1196 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. | |
1197 // We could always eliminate polling by parking the thread on some auxiliary list. | |
1198 if (mark == markOopDesc::INFLATING()) { | |
1199 TEVENT (Inflate: spin while INFLATING) ; | |
1200 ReadStableMark(object) ; | |
1201 continue ; | |
1202 } | |
1203 | |
1204 // CASE: stack-locked | |
1205 // Could be stack-locked either by this thread or by some other thread. | |
1206 // | |
1207 // Note that we allocate the objectmonitor speculatively, _before_ attempting | |
1208 // to install INFLATING into the mark word. We originally installed INFLATING, | |
1209 // allocated the objectmonitor, and then finally STed the address of the | |
1210 // objectmonitor into the mark. This was correct, but artificially lengthened | |
1211 // the interval in which INFLATED appeared in the mark, thus increasing | |
1212 // the odds of inflation contention. | |
1213 // | |
1214 // We now use per-thread private objectmonitor free lists. | |
1215 // These list are reprovisioned from the global free list outside the | |
1216 // critical INFLATING...ST interval. A thread can transfer | |
1217 // multiple objectmonitors en-mass from the global free list to its local free list. | |
1218 // This reduces coherency traffic and lock contention on the global free list. | |
1219 // Using such local free lists, it doesn't matter if the omAlloc() call appears | |
1220 // before or after the CAS(INFLATING) operation. | |
1221 // See the comments in omAlloc(). | |
1222 | |
1223 if (mark->has_locker()) { | |
1224 ObjectMonitor * m = omAlloc (Self) ; | |
1225 // Optimistically prepare the objectmonitor - anticipate successful CAS | |
1226 // We do this before the CAS in order to minimize the length of time | |
1227 // in which INFLATING appears in the mark. | |
1228 m->Recycle(); | |
1229 m->_Responsible = NULL ; | |
1230 m->OwnerIsThread = 0 ; | |
1231 m->_recursions = 0 ; | |
1878 | 1232 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class |
0 | 1233 |
1234 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ; | |
1235 if (cmp != mark) { | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1236 omRelease (Self, m, true) ; |
0 | 1237 continue ; // Interference -- just retry |
1238 } | |
1239 | |
1240 // We've successfully installed INFLATING (0) into the mark-word. | |
1241 // This is the only case where 0 will appear in a mark-work. | |
1242 // Only the singular thread that successfully swings the mark-word | |
1243 // to 0 can perform (or more precisely, complete) inflation. | |
1244 // | |
1245 // Why do we CAS a 0 into the mark-word instead of just CASing the | |
1246 // mark-word from the stack-locked value directly to the new inflated state? | |
1247 // Consider what happens when a thread unlocks a stack-locked object. | |
1248 // It attempts to use CAS to swing the displaced header value from the | |
1249 // on-stack basiclock back into the object header. Recall also that the | |
1250 // header value (hashcode, etc) can reside in (a) the object header, or | |
1251 // (b) a displaced header associated with the stack-lock, or (c) a displaced | |
1252 // header in an objectMonitor. The inflate() routine must copy the header | |
1253 // value from the basiclock on the owner's stack to the objectMonitor, all | |
1254 // the while preserving the hashCode stability invariants. If the owner | |
1255 // decides to release the lock while the value is 0, the unlock will fail | |
1256 // and control will eventually pass from slow_exit() to inflate. The owner | |
1257 // will then spin, waiting for the 0 value to disappear. Put another way, | |
1258 // the 0 causes the owner to stall if the owner happens to try to | |
1259 // drop the lock (restoring the header from the basiclock to the object) | |
1260 // while inflation is in-progress. This protocol avoids races that might | |
1261 // would otherwise permit hashCode values to change or "flicker" for an object. | |
1262 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. | |
1263 // 0 serves as a "BUSY" inflate-in-progress indicator. | |
1264 | |
1265 | |
1266 // fetch the displaced mark from the owner's stack. | |
1267 // The owner can't die or unwind past the lock while our INFLATING | |
1268 // object is in the mark. Furthermore the owner can't complete | |
1269 // an unlock on the object, either. | |
1270 markOop dmw = mark->displaced_mark_helper() ; | |
1271 assert (dmw->is_neutral(), "invariant") ; | |
1272 | |
1273 // Setup monitor fields to proper values -- prepare the monitor | |
1274 m->set_header(dmw) ; | |
1275 | |
1276 // Optimization: if the mark->locker stack address is associated | |
1277 // with this thread we could simply set m->_owner = Self and | |
702
b9fba36710f2
6699669: Hotspot server leaves synchronized block with monitor in bad state
xlu
parents:
579
diff
changeset
|
1278 // m->OwnerIsThread = 1. Note that a thread can inflate an object |
0 | 1279 // that it has stack-locked -- as might happen in wait() -- directly |
1280 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. | |
702
b9fba36710f2
6699669: Hotspot server leaves synchronized block with monitor in bad state
xlu
parents:
579
diff
changeset
|
1281 m->set_owner(mark->locker()); |
0 | 1282 m->set_object(object); |
1283 // TODO-FIXME: assert BasicLock->dhw != 0. | |
1284 | |
1285 // Must preserve store ordering. The monitor state must | |
1286 // be stable at the time of publishing the monitor address. | |
1287 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ; | |
1288 object->release_set_mark(markOopDesc::encode(m)); | |
1289 | |
1290 // Hopefully the performance counters are allocated on distinct cache lines | |
1291 // to avoid false sharing on MP systems ... | |
1878 | 1292 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ; |
0 | 1293 TEVENT(Inflate: overwrite stacklock) ; |
1294 if (TraceMonitorInflation) { | |
1295 if (object->is_instance()) { | |
1296 ResourceMark rm; | |
1297 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", | |
12316
190899198332
7195622: CheckUnhandledOops has limited usefulness now
hseigel
parents:
10405
diff
changeset
|
1298 (void *) object, (intptr_t) object->mark(), |
6983 | 1299 object->klass()->external_name()); |
0 | 1300 } |
1301 } | |
1302 return m ; | |
1303 } | |
1304 | |
1305 // CASE: neutral | |
1306 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. | |
1307 // If we know we're inflating for entry it's better to inflate by swinging a | |
1308 // pre-locked objectMonitor pointer into the object header. A successful | |
1309 // CAS inflates the object *and* confers ownership to the inflating thread. | |
1310 // In the current implementation we use a 2-step mechanism where we CAS() | |
1311 // to inflate and then CAS() again to try to swing _owner from NULL to Self. | |
1312 // An inflateTry() method that we could call from fast_enter() and slow_enter() | |
1313 // would be useful. | |
1314 | |
1315 assert (mark->is_neutral(), "invariant"); | |
1316 ObjectMonitor * m = omAlloc (Self) ; | |
1317 // prepare m for installation - set monitor to initial state | |
1318 m->Recycle(); | |
1319 m->set_header(mark); | |
1320 m->set_owner(NULL); | |
1321 m->set_object(object); | |
1322 m->OwnerIsThread = 1 ; | |
1323 m->_recursions = 0 ; | |
1324 m->_Responsible = NULL ; | |
1878 | 1325 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class |
0 | 1326 |
1327 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { | |
1328 m->set_object (NULL) ; | |
1329 m->set_owner (NULL) ; | |
1330 m->OwnerIsThread = 0 ; | |
1331 m->Recycle() ; | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1332 omRelease (Self, m, true) ; |
0 | 1333 m = NULL ; |
1334 continue ; | |
1335 // interference - the markword changed - just retry. | |
1336 // The state-transitions are one-way, so there's no chance of | |
1337 // live-lock -- "Inflated" is an absorbing state. | |
1338 } | |
1339 | |
1340 // Hopefully the performance counters are allocated on distinct | |
1341 // cache lines to avoid false sharing on MP systems ... | |
1878 | 1342 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ; |
0 | 1343 TEVENT(Inflate: overwrite neutral) ; |
1344 if (TraceMonitorInflation) { | |
1345 if (object->is_instance()) { | |
1346 ResourceMark rm; | |
1347 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", | |
12316
190899198332
7195622: CheckUnhandledOops has limited usefulness now
hseigel
parents:
10405
diff
changeset
|
1348 (void *) object, (intptr_t) object->mark(), |
6983 | 1349 object->klass()->external_name()); |
0 | 1350 } |
1351 } | |
1352 return m ; | |
1353 } | |
1354 } | |
1355 | |
1878 | 1356 // Note that we could encounter some performance loss through false-sharing as |
1357 // multiple locks occupy the same $ line. Padding might be appropriate. | |
0 | 1358 |
1359 | |
1360 // Deflate_idle_monitors() is called at all safepoints, immediately | |
1361 // after all mutators are stopped, but before any objects have moved. | |
1362 // It traverses the list of known monitors, deflating where possible. | |
1363 // The scavenged monitor are returned to the monitor free list. | |
1364 // | |
1365 // Beware that we scavenge at *every* stop-the-world point. | |
1366 // Having a large number of monitors in-circulation negatively | |
1367 // impacts the performance of some applications (e.g., PointBase). | |
1368 // Broadly, we want to minimize the # of monitors in circulation. | |
1587 | 1369 // |
1370 // We have added a flag, MonitorInUseLists, which creates a list | |
1371 // of active monitors for each thread. deflate_idle_monitors() | |
1372 // only scans the per-thread inuse lists. omAlloc() puts all | |
1373 // assigned monitors on the per-thread list. deflate_idle_monitors() | |
1374 // returns the non-busy monitors to the global free list. | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1375 // When a thread dies, omFlush() adds the list of active monitors for |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1376 // that thread to a global gOmInUseList acquiring the |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1377 // global list lock. deflate_idle_monitors() acquires the global |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1378 // list lock to scan for non-busy monitors to the global free list. |
1587 | 1379 // An alternative could have used a single global inuse list. The |
1380 // downside would have been the additional cost of acquiring the global list lock | |
1381 // for every omAlloc(). | |
0 | 1382 // |
1383 // Perversely, the heap size -- and thus the STW safepoint rate -- | |
1384 // typically drives the scavenge rate. Large heaps can mean infrequent GC, | |
1385 // which in turn can mean large(r) numbers of objectmonitors in circulation. | |
1386 // This is an unfortunate aspect of this design. | |
1387 // | |
1388 | |
1878 | 1389 enum ManifestConstants { |
1390 ClearResponsibleAtSTW = 0, | |
1391 MaximumRecheckInterval = 1000 | |
1392 } ; | |
1587 | 1393 |
1394 // Deflate a single monitor if not in use | |
1395 // Return true if deflated, false if in use | |
1396 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, | |
1397 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) { | |
1398 bool deflated; | |
1399 // Normal case ... The monitor is associated with obj. | |
1400 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ; | |
1401 guarantee (mid == obj->mark()->monitor(), "invariant"); | |
1402 guarantee (mid->header()->is_neutral(), "invariant"); | |
1403 | |
1404 if (mid->is_busy()) { | |
1405 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ; | |
1406 deflated = false; | |
1407 } else { | |
1408 // Deflate the monitor if it is no longer being used | |
1409 // It's idle - scavenge and return to the global free list | |
1410 // plain old deflation ... | |
1411 TEVENT (deflate_idle_monitors - scavenge1) ; | |
1412 if (TraceMonitorInflation) { | |
1413 if (obj->is_instance()) { | |
1414 ResourceMark rm; | |
1415 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", | |
12316
190899198332
7195622: CheckUnhandledOops has limited usefulness now
hseigel
parents:
10405
diff
changeset
|
1416 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name()); |
1587 | 1417 } |
1418 } | |
1419 | |
1420 // Restore the header back to obj | |
1421 obj->release_set_mark(mid->header()); | |
1422 mid->clear(); | |
1423 | |
1424 assert (mid->object() == NULL, "invariant") ; | |
1425 | |
1426 // Move the object to the working free list defined by FreeHead,FreeTail. | |
1427 if (*FreeHeadp == NULL) *FreeHeadp = mid; | |
1428 if (*FreeTailp != NULL) { | |
1429 ObjectMonitor * prevtail = *FreeTailp; | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1430 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK |
1587 | 1431 prevtail->FreeNext = mid; |
1432 } | |
1433 *FreeTailp = mid; | |
1434 deflated = true; | |
1435 } | |
1436 return deflated; | |
1437 } | |
1438 | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1439 // Caller acquires ListLock |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1440 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp, |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1441 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1442 ObjectMonitor* mid; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1443 ObjectMonitor* next; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1444 ObjectMonitor* curmidinuse = NULL; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1445 int deflatedcount = 0; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1446 |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1447 for (mid = *listheadp; mid != NULL; ) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1448 oop obj = (oop) mid->object(); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1449 bool deflated = false; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1450 if (obj != NULL) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1451 deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1452 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1453 if (deflated) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1454 // extract from per-thread in-use-list |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1455 if (mid == *listheadp) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1456 *listheadp = mid->FreeNext; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1457 } else if (curmidinuse != NULL) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1458 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1459 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1460 next = mid->FreeNext; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1461 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1462 mid = next; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1463 deflatedcount++; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1464 } else { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1465 curmidinuse = mid; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1466 mid = mid->FreeNext; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1467 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1468 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1469 return deflatedcount; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1470 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1471 |
0 | 1472 void ObjectSynchronizer::deflate_idle_monitors() { |
1473 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); | |
1474 int nInuse = 0 ; // currently associated with objects | |
1475 int nInCirculation = 0 ; // extant | |
1476 int nScavenged = 0 ; // reclaimed | |
1587 | 1477 bool deflated = false; |
0 | 1478 |
1479 ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors | |
1480 ObjectMonitor * FreeTail = NULL ; | |
1481 | |
1587 | 1482 TEVENT (deflate_idle_monitors) ; |
1483 // Prevent omFlush from changing mids in Thread dtor's during deflation | |
1484 // And in case the vm thread is acquiring a lock during a safepoint | |
1485 // See e.g. 6320749 | |
1486 Thread::muxAcquire (&ListLock, "scavenge - return") ; | |
1487 | |
1488 if (MonitorInUseLists) { | |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1489 int inUse = 0; |
1587 | 1490 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1491 nInCirculation+= cur->omInUseCount; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1492 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1493 cur->omInUseCount-= deflatedcount; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1494 // verifyInUse(cur); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1495 nScavenged += deflatedcount; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1496 nInuse += cur->omInUseCount; |
1587 | 1497 } |
1640
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1498 |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1499 // For moribund threads, scan gOmInUseList |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1500 if (gOmInUseList) { |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1501 nInCirculation += gOmInUseCount; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1502 int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail); |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1503 gOmInUseCount-= deflatedcount; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1504 nScavenged += deflatedcount; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1505 nInuse += gOmInUseCount; |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1506 } |
bfc89697cccb
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
1589
diff
changeset
|
1507 |
1587 | 1508 } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { |
0 | 1509 // Iterate over all extant monitors - Scavenge all idle monitors. |
1510 assert(block->object() == CHAINMARKER, "must be a block header"); | |
1511 nInCirculation += _BLOCKSIZE ; | |
1512 for (int i = 1 ; i < _BLOCKSIZE; i++) { | |
1513 ObjectMonitor* mid = &block[i]; | |
1514 oop obj = (oop) mid->object(); | |
1515 | |
1516 if (obj == NULL) { | |
1517 // The monitor is not associated with an object. | |
1518 // The monitor should either be a thread-specific private | |
1519 // free list or the global free list. | |
1520 // obj == NULL IMPLIES mid->is_busy() == 0 | |
1521 guarantee (!mid->is_busy(), "invariant") ; | |
1522 continue ; | |
1523 } | |
1587 | 1524 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail); |
1525 | |
1526 if (deflated) { | |
1527 mid->FreeNext = NULL ; | |
1528 nScavenged ++ ; | |
0 | 1529 } else { |
1587 | 1530 nInuse ++; |
0 | 1531 } |
1532 } | |
1533 } | |
1534 | |
1587 | 1535 MonitorFreeCount += nScavenged; |
1536 | |
1537 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree. | |
1538 | |
1878 | 1539 if (ObjectMonitor::Knob_Verbose) { |
1587 | 1540 ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n", |
1541 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, | |
1542 MonitorPopulation, MonitorFreeCount) ; | |
1543 ::fflush(stdout) ; | |
1544 } | |
1545 | |
1546 ForceMonitorScavenge = 0; // Reset | |
1547 | |
0 | 1548 // Move the scavenged monitors back to the global free list. |
1549 if (FreeHead != NULL) { | |
1550 guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ; | |
1551 assert (FreeTail->FreeNext == NULL, "invariant") ; | |
1552 // constant-time list splice - prepend scavenged segment to gFreeList | |
1553 FreeTail->FreeNext = gFreeList ; | |
1554 gFreeList = FreeHead ; | |
1555 } | |
1587 | 1556 Thread::muxRelease (&ListLock) ; |
0 | 1557 |
1878 | 1558 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ; |
1559 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation); | |
0 | 1560 |
1561 // TODO: Add objectMonitor leak detection. | |
1562 // Audit/inventory the objectMonitors -- make sure they're all accounted for. | |
1563 GVars.stwRandom = os::random() ; | |
1564 GVars.stwCycle ++ ; | |
1565 } | |
1566 | |
1878 | 1567 // Monitor cleanup on JavaThread::exit |
0 | 1568 |
1878 | 1569 // Iterate through monitor cache and attempt to release thread's monitors |
1570 // Gives up on a particular monitor if an exception occurs, but continues | |
1571 // the overall iteration, swallowing the exception. | |
1572 class ReleaseJavaMonitorsClosure: public MonitorClosure { | |
1573 private: | |
1574 TRAPS; | |
0 | 1575 |
1878 | 1576 public: |
1577 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} | |
1578 void do_monitor(ObjectMonitor* mid) { | |
1579 if (mid->owner() == THREAD) { | |
1580 (void)mid->complete_exit(CHECK); | |
0 | 1581 } |
1582 } | |
1878 | 1583 }; |
0 | 1584 |
1878 | 1585 // Release all inflated monitors owned by THREAD. Lightweight monitors are |
1586 // ignored. This is meant to be called during JNI thread detach which assumes | |
1587 // all remaining monitors are heavyweight. All exceptions are swallowed. | |
1588 // Scanning the extant monitor list can be time consuming. | |
1589 // A simple optimization is to add a per-thread flag that indicates a thread | |
1590 // called jni_monitorenter() during its lifetime. | |
0 | 1591 // |
1878 | 1592 // Instead of No_Savepoint_Verifier it might be cheaper to |
1593 // use an idiom of the form: | |
1594 // auto int tmp = SafepointSynchronize::_safepoint_counter ; | |
1595 // <code that must not run at safepoint> | |
1596 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; | |
1597 // Since the tests are extremely cheap we could leave them enabled | |
1598 // for normal product builds. | |
0 | 1599 |
1878 | 1600 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { |
1601 assert(THREAD == JavaThread::current(), "must be current Java thread"); | |
1602 No_Safepoint_Verifier nsv ; | |
1603 ReleaseJavaMonitorsClosure rjmc(THREAD); | |
1604 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); | |
1605 ObjectSynchronizer::monitors_iterate(&rjmc); | |
1606 Thread::muxRelease(&ListLock); | |
1607 THREAD->clear_pending_exception(); | |
0 | 1608 } |
1609 | |
1610 //------------------------------------------------------------------------------ | |
1611 // Non-product code | |
1612 | |
1613 #ifndef PRODUCT | |
1614 | |
1615 // Verify all monitors in the monitor cache, the verification is weak. | |
1616 void ObjectSynchronizer::verify() { | |
1617 ObjectMonitor* block = gBlockList; | |
1618 ObjectMonitor* mid; | |
1619 while (block) { | |
1620 assert(block->object() == CHAINMARKER, "must be a block header"); | |
1621 for (int i = 1; i < _BLOCKSIZE; i++) { | |
1622 mid = block + i; | |
1623 oop object = (oop) mid->object(); | |
1624 if (object != NULL) { | |
1625 mid->verify(); | |
1626 } | |
1627 } | |
1628 block = (ObjectMonitor*) block->FreeNext; | |
1629 } | |
1630 } | |
1631 | |
1632 // Check if monitor belongs to the monitor cache | |
1633 // The list is grow-only so it's *relatively* safe to traverse | |
1634 // the list of extant blocks without taking a lock. | |
1635 | |
1636 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { | |
1637 ObjectMonitor* block = gBlockList; | |
1638 | |
1639 while (block) { | |
1640 assert(block->object() == CHAINMARKER, "must be a block header"); | |
1641 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { | |
1642 address mon = (address) monitor; | |
1643 address blk = (address) block; | |
1644 size_t diff = mon - blk; | |
1645 assert((diff % sizeof(ObjectMonitor)) == 0, "check"); | |
1646 return 1; | |
1647 } | |
1648 block = (ObjectMonitor*) block->FreeNext; | |
1649 } | |
1650 return 0; | |
1651 } | |
1652 | |
1653 #endif |