Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/gcLocker.hpp @ 1994:6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp
author | ysr |
---|---|
date | Tue, 07 Dec 2010 21:55:53 -0800 |
parents | f95d63e2154a |
children | f08d439fab8c |
rev | line source |
---|---|
0 | 1 /* |
1972 | 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
844
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
844
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
844
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_MEMORY_GCLOCKER_HPP |
26 #define SHARE_VM_MEMORY_GCLOCKER_HPP | |
27 | |
28 #include "gc_interface/collectedHeap.hpp" | |
29 #include "memory/genCollectedHeap.hpp" | |
30 #include "memory/universe.hpp" | |
31 #include "oops/oop.hpp" | |
32 #ifdef TARGET_OS_FAMILY_linux | |
33 # include "os_linux.inline.hpp" | |
34 # include "thread_linux.inline.hpp" | |
35 #endif | |
36 #ifdef TARGET_OS_FAMILY_solaris | |
37 # include "os_solaris.inline.hpp" | |
38 # include "thread_solaris.inline.hpp" | |
39 #endif | |
40 #ifdef TARGET_OS_FAMILY_windows | |
41 # include "os_windows.inline.hpp" | |
42 # include "thread_windows.inline.hpp" | |
43 #endif | |
44 | |
0 | 45 // The direct lock/unlock calls do not force a collection if an unlock |
46 // decrements the count to zero. Avoid calling these if at all possible. | |
47 | |
48 class GC_locker: public AllStatic { | |
49 private: | |
50 static volatile jint _jni_lock_count; // number of jni active instances | |
51 static volatile jint _lock_count; // number of other active instances | |
52 static volatile bool _needs_gc; // heap is filling, we need a GC | |
53 // note: bool is typedef'd as jint | |
54 static volatile bool _doing_gc; // unlock_critical() is doing a GC | |
55 | |
56 // Accessors | |
57 static bool is_jni_active() { | |
58 return _jni_lock_count > 0; | |
59 } | |
60 | |
61 static void set_needs_gc() { | |
62 assert(SafepointSynchronize::is_at_safepoint(), | |
63 "needs_gc is only set at a safepoint"); | |
64 _needs_gc = true; | |
65 } | |
66 | |
67 static void clear_needs_gc() { | |
68 assert_lock_strong(JNICritical_lock); | |
69 _needs_gc = false; | |
70 } | |
71 | |
72 static void jni_lock() { | |
73 Atomic::inc(&_jni_lock_count); | |
74 CHECK_UNHANDLED_OOPS_ONLY( | |
75 if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; }) | |
76 assert(Universe::heap() == NULL || !Universe::heap()->is_gc_active(), | |
77 "locking failed"); | |
78 } | |
79 | |
80 static void jni_unlock() { | |
81 Atomic::dec(&_jni_lock_count); | |
82 CHECK_UNHANDLED_OOPS_ONLY( | |
83 if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; }) | |
84 } | |
85 | |
86 static void jni_lock_slow(); | |
87 static void jni_unlock_slow(); | |
88 | |
89 public: | |
90 // Accessors | |
91 static bool is_active(); | |
92 static bool needs_gc() { return _needs_gc; } | |
93 // Shorthand | |
94 static bool is_active_and_needs_gc() { return is_active() && needs_gc();} | |
95 | |
96 // Calls set_needs_gc() if is_active() is true. Returns is_active(). | |
97 static bool check_active_before_gc(); | |
98 | |
99 // Stalls the caller (who should not be in a jni critical section) | |
100 // until needs_gc() clears. Note however that needs_gc() may be | |
101 // set at a subsequent safepoint and/or cleared under the | |
102 // JNICritical_lock, so the caller may not safely assert upon | |
103 // return from this method that "!needs_gc()" since that is | |
104 // not a stable predicate. | |
105 static void stall_until_clear(); | |
106 | |
107 // Non-structured GC locking: currently needed for JNI. Use with care! | |
108 static void lock(); | |
109 static void unlock(); | |
110 | |
111 // The following two methods are used for JNI critical regions. | |
112 // If we find that we failed to perform a GC because the GC_locker | |
113 // was active, arrange for one as soon as possible by allowing | |
114 // all threads in critical regions to complete, but not allowing | |
115 // other critical regions to be entered. The reasons for that are: | |
116 // 1) a GC request won't be starved by overlapping JNI critical | |
117 // region activities, which can cause unnecessary OutOfMemory errors. | |
118 // 2) even if allocation requests can still be satisfied before GC locker | |
119 // becomes inactive, for example, in tenured generation possibly with | |
120 // heap expansion, those allocations can trigger lots of safepointing | |
121 // attempts (ineffective GC attempts) and require Heap_lock which | |
122 // slow down allocations tremendously. | |
123 // | |
124 // Note that critical regions can be nested in a single thread, so | |
125 // we must allow threads already in critical regions to continue. | |
126 // | |
127 // JNI critical regions are the only participants in this scheme | |
128 // because they are, by spec, well bounded while in a critical region. | |
129 // | |
130 // Each of the following two method is split into a fast path and a slow | |
131 // path. JNICritical_lock is only grabbed in the slow path. | |
132 // _needs_gc is initially false and every java thread will go | |
133 // through the fast path (which does the same thing as the slow path | |
134 // when _needs_gc is false). When GC happens at a safepoint, | |
135 // GC_locker::is_active() is checked. Since there is no safepoint in the | |
136 // fast path of lock_critical() and unlock_critical(), there is no race | |
137 // condition between the fast path and GC. After _needs_gc is set at a | |
138 // safepoint, every thread will go through the slow path after the safepoint. | |
139 // Since after a safepoint, each of the following two methods is either | |
140 // entered from the method entry and falls into the slow path, or is | |
141 // resumed from the safepoints in the method, which only exist in the slow | |
142 // path. So when _needs_gc is set, the slow path is always taken, till | |
143 // _needs_gc is cleared. | |
144 static void lock_critical(JavaThread* thread); | |
145 static void unlock_critical(JavaThread* thread); | |
146 }; | |
147 | |
148 | |
149 // A No_GC_Verifier object can be placed in methods where one assumes that | |
150 // no garbage collection will occur. The destructor will verify this property | |
151 // unless the constructor is called with argument false (not verifygc). | |
152 // | |
153 // The check will only be done in debug mode and if verifygc true. | |
154 | |
155 class No_GC_Verifier: public StackObj { | |
156 friend class Pause_No_GC_Verifier; | |
157 | |
158 protected: | |
159 bool _verifygc; | |
160 unsigned int _old_invocations; | |
161 | |
162 public: | |
163 #ifdef ASSERT | |
164 No_GC_Verifier(bool verifygc = true); | |
165 ~No_GC_Verifier(); | |
166 #else | |
167 No_GC_Verifier(bool verifygc = true) {} | |
168 ~No_GC_Verifier() {} | |
169 #endif | |
170 }; | |
171 | |
172 // A Pause_No_GC_Verifier is used to temporarily pause the behavior | |
173 // of a No_GC_Verifier object. If we are not in debug mode or if the | |
174 // No_GC_Verifier object has a _verifygc value of false, then there | |
175 // is nothing to do. | |
176 | |
177 class Pause_No_GC_Verifier: public StackObj { | |
178 private: | |
179 No_GC_Verifier * _ngcv; | |
180 | |
181 public: | |
182 #ifdef ASSERT | |
183 Pause_No_GC_Verifier(No_GC_Verifier * ngcv); | |
184 ~Pause_No_GC_Verifier(); | |
185 #else | |
186 Pause_No_GC_Verifier(No_GC_Verifier * ngcv) {} | |
187 ~Pause_No_GC_Verifier() {} | |
188 #endif | |
189 }; | |
190 | |
191 | |
192 // A No_Safepoint_Verifier object will throw an assertion failure if | |
193 // the current thread passes a possible safepoint while this object is | |
194 // instantiated. A safepoint, will either be: an oop allocation, blocking | |
195 // on a Mutex or JavaLock, or executing a VM operation. | |
196 // | |
197 // If StrictSafepointChecks is turned off, it degrades into a No_GC_Verifier | |
198 // | |
199 class No_Safepoint_Verifier : public No_GC_Verifier { | |
200 friend class Pause_No_Safepoint_Verifier; | |
201 | |
202 private: | |
203 bool _activated; | |
204 Thread *_thread; | |
205 public: | |
206 #ifdef ASSERT | |
98
deb97b8ef02b
6679708: No_Safepoint_Verifier and BacktraceBuilder have uninitialized fields
never
parents:
0
diff
changeset
|
207 No_Safepoint_Verifier(bool activated = true, bool verifygc = true ) : |
deb97b8ef02b
6679708: No_Safepoint_Verifier and BacktraceBuilder have uninitialized fields
never
parents:
0
diff
changeset
|
208 No_GC_Verifier(verifygc), |
deb97b8ef02b
6679708: No_Safepoint_Verifier and BacktraceBuilder have uninitialized fields
never
parents:
0
diff
changeset
|
209 _activated(activated) { |
0 | 210 _thread = Thread::current(); |
211 if (_activated) { | |
212 _thread->_allow_allocation_count++; | |
213 _thread->_allow_safepoint_count++; | |
214 } | |
215 } | |
216 | |
217 ~No_Safepoint_Verifier() { | |
218 if (_activated) { | |
219 _thread->_allow_allocation_count--; | |
220 _thread->_allow_safepoint_count--; | |
221 } | |
222 } | |
223 #else | |
224 No_Safepoint_Verifier(bool activated = true, bool verifygc = true) : No_GC_Verifier(verifygc){} | |
225 ~No_Safepoint_Verifier() {} | |
226 #endif | |
227 }; | |
228 | |
229 // A Pause_No_Safepoint_Verifier is used to temporarily pause the | |
230 // behavior of a No_Safepoint_Verifier object. If we are not in debug | |
231 // mode then there is nothing to do. If the No_Safepoint_Verifier | |
232 // object has an _activated value of false, then there is nothing to | |
233 // do for safepoint and allocation checking, but there may still be | |
234 // something to do for the underlying No_GC_Verifier object. | |
235 | |
236 class Pause_No_Safepoint_Verifier : public Pause_No_GC_Verifier { | |
237 private: | |
238 No_Safepoint_Verifier * _nsv; | |
239 | |
240 public: | |
241 #ifdef ASSERT | |
242 Pause_No_Safepoint_Verifier(No_Safepoint_Verifier * nsv) | |
243 : Pause_No_GC_Verifier(nsv) { | |
244 | |
245 _nsv = nsv; | |
246 if (_nsv->_activated) { | |
247 _nsv->_thread->_allow_allocation_count--; | |
248 _nsv->_thread->_allow_safepoint_count--; | |
249 } | |
250 } | |
251 | |
252 ~Pause_No_Safepoint_Verifier() { | |
253 if (_nsv->_activated) { | |
254 _nsv->_thread->_allow_allocation_count++; | |
255 _nsv->_thread->_allow_safepoint_count++; | |
256 } | |
257 } | |
258 #else | |
259 Pause_No_Safepoint_Verifier(No_Safepoint_Verifier * nsv) | |
260 : Pause_No_GC_Verifier(nsv) {} | |
261 ~Pause_No_Safepoint_Verifier() {} | |
262 #endif | |
263 }; | |
264 | |
806
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
265 // A SkipGCALot object is used to elide the usual effect of gc-a-lot |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
266 // over a section of execution by a thread. Currently, it's used only to |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
267 // prevent re-entrant calls to GC. |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
268 class SkipGCALot : public StackObj { |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
269 private: |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
270 bool _saved; |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
271 Thread* _t; |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
272 |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
273 public: |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
274 #ifdef ASSERT |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
275 SkipGCALot(Thread* t) : _t(t) { |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
276 _saved = _t->skip_gcalot(); |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
277 _t->set_skip_gcalot(true); |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
278 } |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
279 |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
280 ~SkipGCALot() { |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
281 assert(_t->skip_gcalot(), "Save-restore protocol invariant"); |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
282 _t->set_skip_gcalot(_saved); |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
283 } |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
284 #else |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
285 SkipGCALot(Thread* t) { } |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
286 ~SkipGCALot() { } |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
287 #endif |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
288 }; |
821269eca479
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
196
diff
changeset
|
289 |
0 | 290 // JRT_LEAF currently can be called from either _thread_in_Java or |
291 // _thread_in_native mode. In _thread_in_native, it is ok | |
292 // for another thread to trigger GC. The rest of the JRT_LEAF | |
293 // rules apply. | |
294 class JRT_Leaf_Verifier : public No_Safepoint_Verifier { | |
295 static bool should_verify_GC(); | |
296 public: | |
297 #ifdef ASSERT | |
298 JRT_Leaf_Verifier(); | |
299 ~JRT_Leaf_Verifier(); | |
300 #else | |
301 JRT_Leaf_Verifier() {} | |
302 ~JRT_Leaf_Verifier() {} | |
303 #endif | |
304 }; | |
305 | |
306 // A No_Alloc_Verifier object can be placed in methods where one assumes that | |
307 // no allocation will occur. The destructor will verify this property | |
308 // unless the constructor is called with argument false (not activated). | |
309 // | |
310 // The check will only be done in debug mode and if activated. | |
311 // Note: this only makes sense at safepoints (otherwise, other threads may | |
312 // allocate concurrently.) | |
313 | |
314 class No_Alloc_Verifier : public StackObj { | |
315 private: | |
316 bool _activated; | |
317 | |
318 public: | |
319 #ifdef ASSERT | |
320 No_Alloc_Verifier(bool activated = true) { | |
321 _activated = activated; | |
322 if (_activated) Thread::current()->_allow_allocation_count++; | |
323 } | |
324 | |
325 ~No_Alloc_Verifier() { | |
326 if (_activated) Thread::current()->_allow_allocation_count--; | |
327 } | |
328 #else | |
329 No_Alloc_Verifier(bool activated = true) {} | |
330 ~No_Alloc_Verifier() {} | |
331 #endif | |
332 }; | |
1972 | 333 |
334 #endif // SHARE_VM_MEMORY_GCLOCKER_HPP |