Mercurial > hg > graal-jvmci-8
annotate src/share/vm/runtime/vm_operations.hpp @ 3979:4dfb2df418f2
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
author | johnc |
---|---|
date | Thu, 22 Sep 2011 10:57:37 -0700 |
parents | 1d1603768966 |
children | 94ec88ca68e2 |
rev | line source |
---|---|
0 | 1 /* |
2426
1d1603768966
7010070: Update all 2010 Oracle-changed OpenJDK files to have the proper copyright dates - second pass
trims
parents:
2177
diff
changeset
|
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1202
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1202
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1202
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_RUNTIME_VM_OPERATIONS_HPP |
26 #define SHARE_VM_RUNTIME_VM_OPERATIONS_HPP | |
27 | |
28 #include "classfile/javaClasses.hpp" | |
29 #include "memory/allocation.hpp" | |
30 #include "oops/oop.hpp" | |
31 #include "runtime/thread.hpp" | |
32 #include "utilities/top.hpp" | |
33 | |
0 | 34 // The following classes are used for operations |
35 // initiated by a Java thread but that must | |
36 // take place in the VMThread. | |
37 | |
38 #define VM_OP_ENUM(type) VMOp_##type, | |
39 | |
40 // Note: When new VM_XXX comes up, add 'XXX' to the template table. | |
41 #define VM_OPS_DO(template) \ | |
42 template(Dummy) \ | |
43 template(ThreadStop) \ | |
44 template(ThreadDump) \ | |
45 template(PrintThreads) \ | |
46 template(FindDeadlocks) \ | |
47 template(ForceSafepoint) \ | |
48 template(ForceAsyncSafepoint) \ | |
49 template(Deoptimize) \ | |
50 template(DeoptimizeFrame) \ | |
51 template(DeoptimizeAll) \ | |
52 template(ZombieAll) \ | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
53 template(UnlinkSymbols) \ |
1202 | 54 template(HandleFullCodeCache) \ |
0 | 55 template(Verify) \ |
56 template(PrintJNI) \ | |
57 template(HeapDumper) \ | |
58 template(DeoptimizeTheWorld) \ | |
59 template(GC_HeapInspection) \ | |
60 template(GenCollectFull) \ | |
61 template(GenCollectFullConcurrent) \ | |
62 template(GenCollectForAllocation) \ | |
139
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
0
diff
changeset
|
63 template(GenCollectForPermanentAllocation) \ |
0 | 64 template(ParallelGCFailedAllocation) \ |
65 template(ParallelGCFailedPermanentAllocation) \ | |
66 template(ParallelGCSystemGC) \ | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
139
diff
changeset
|
67 template(CGC_Operation) \ |
0 | 68 template(CMS_Initial_Mark) \ |
69 template(CMS_Final_Remark) \ | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
139
diff
changeset
|
70 template(G1CollectFull) \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
139
diff
changeset
|
71 template(G1CollectForAllocation) \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
139
diff
changeset
|
72 template(G1IncCollectionPause) \ |
0 | 73 template(EnableBiasedLocking) \ |
74 template(RevokeBias) \ | |
75 template(BulkRevokeBias) \ | |
76 template(PopulateDumpSharedSpace) \ | |
77 template(JNIFunctionTableCopier) \ | |
78 template(RedefineClasses) \ | |
79 template(GetOwnedMonitorInfo) \ | |
80 template(GetObjectMonitorUsage) \ | |
81 template(GetCurrentContendedMonitor) \ | |
82 template(GetStackTrace) \ | |
83 template(GetMultipleStackTraces) \ | |
84 template(GetAllStackTraces) \ | |
85 template(GetThreadListStackTraces) \ | |
86 template(GetFrameCount) \ | |
87 template(GetFrameLocation) \ | |
88 template(ChangeBreakpoints) \ | |
89 template(GetOrSetLocal) \ | |
90 template(GetCurrentLocation) \ | |
91 template(EnterInterpOnlyMode) \ | |
92 template(ChangeSingleStep) \ | |
93 template(HeapWalkOperation) \ | |
94 template(HeapIterateOperation) \ | |
95 template(ReportJavaOutOfMemory) \ | |
96 template(Exit) \ | |
97 | |
98 class VM_Operation: public CHeapObj { | |
99 public: | |
100 enum Mode { | |
101 _safepoint, // blocking, safepoint, vm_op C-heap allocated | |
102 _no_safepoint, // blocking, no safepoint, vm_op C-Heap allocated | |
103 _concurrent, // non-blocking, no safepoint, vm_op C-Heap allocated | |
104 _async_safepoint // non-blocking, safepoint, vm_op C-Heap allocated | |
105 }; | |
106 | |
107 enum VMOp_Type { | |
108 VM_OPS_DO(VM_OP_ENUM) | |
109 VMOp_Terminating | |
110 }; | |
111 | |
112 private: | |
113 Thread* _calling_thread; | |
114 ThreadPriority _priority; | |
115 long _timestamp; | |
116 VM_Operation* _next; | |
117 VM_Operation* _prev; | |
118 | |
119 // The VM operation name array | |
120 static const char* _names[]; | |
121 | |
122 public: | |
123 VM_Operation() { _calling_thread = NULL; _next = NULL; _prev = NULL; } | |
124 virtual ~VM_Operation() {} | |
125 | |
126 // VM operation support (used by VM thread) | |
127 Thread* calling_thread() const { return _calling_thread; } | |
128 ThreadPriority priority() { return _priority; } | |
129 void set_calling_thread(Thread* thread, ThreadPriority priority); | |
130 | |
131 long timestamp() const { return _timestamp; } | |
132 void set_timestamp(long timestamp) { _timestamp = timestamp; } | |
133 | |
134 // Called by VM thread - does in turn invoke doit(). Do not override this | |
135 void evaluate(); | |
136 | |
137 // evaluate() is called by the VMThread and in turn calls doit(). | |
138 // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread, | |
139 // doit_prologue() is called in that thread before transferring control to | |
140 // the VMThread. | |
141 // If doit_prologue() returns true the VM operation will proceed, and | |
142 // doit_epilogue() will be called by the JavaThread once the VM operation | |
143 // completes. If doit_prologue() returns false the VM operation is cancelled. | |
144 virtual void doit() = 0; | |
145 virtual bool doit_prologue() { return true; }; | |
146 virtual void doit_epilogue() {}; // Note: Not called if mode is: _concurrent | |
147 | |
148 // Type test | |
149 virtual bool is_methodCompiler() const { return false; } | |
150 | |
151 // Linking | |
152 VM_Operation *next() const { return _next; } | |
153 VM_Operation *prev() const { return _prev; } | |
154 void set_next(VM_Operation *next) { _next = next; } | |
155 void set_prev(VM_Operation *prev) { _prev = prev; } | |
156 | |
157 // Configuration. Override these appropriatly in subclasses. | |
158 virtual VMOp_Type type() const = 0; | |
159 virtual Mode evaluation_mode() const { return _safepoint; } | |
160 virtual bool allow_nested_vm_operations() const { return false; } | |
161 virtual bool is_cheap_allocated() const { return false; } | |
162 virtual void oops_do(OopClosure* f) { /* do nothing */ }; | |
163 | |
164 // CAUTION: <don't hang yourself with following rope> | |
165 // If you override these methods, make sure that the evaluation | |
166 // of these methods is race-free and non-blocking, since these | |
167 // methods may be evaluated either by the mutators or by the | |
168 // vm thread, either concurrently with mutators or with the mutators | |
169 // stopped. In other words, taking locks is verboten, and if there | |
170 // are any races in evaluating the conditions, they'd better be benign. | |
171 virtual bool evaluate_at_safepoint() const { | |
172 return evaluation_mode() == _safepoint || | |
173 evaluation_mode() == _async_safepoint; | |
174 } | |
175 virtual bool evaluate_concurrently() const { | |
176 return evaluation_mode() == _concurrent || | |
177 evaluation_mode() == _async_safepoint; | |
178 } | |
179 | |
180 // Debugging | |
181 void print_on_error(outputStream* st) const; | |
182 const char* name() const { return _names[type()]; } | |
183 static const char* name(int type) { | |
184 assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type"); | |
185 return _names[type]; | |
186 } | |
187 #ifndef PRODUCT | |
188 void print_on(outputStream* st) const { print_on_error(st); } | |
189 #endif | |
190 }; | |
191 | |
192 class VM_ThreadStop: public VM_Operation { | |
193 private: | |
194 oop _thread; // The Thread that the Throwable is thrown against | |
195 oop _throwable; // The Throwable thrown at the target Thread | |
196 public: | |
197 // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the | |
198 // VM operation is executed. | |
199 VM_ThreadStop(oop thread, oop throwable) { | |
200 _thread = thread; | |
201 _throwable = throwable; | |
202 } | |
203 VMOp_Type type() const { return VMOp_ThreadStop; } | |
204 oop target_thread() const { return _thread; } | |
205 oop throwable() const { return _throwable;} | |
206 void doit(); | |
207 // We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated | |
208 bool allow_nested_vm_operations() const { return true; } | |
209 Mode evaluation_mode() const { return _async_safepoint; } | |
210 bool is_cheap_allocated() const { return true; } | |
211 | |
212 // GC support | |
213 void oops_do(OopClosure* f) { | |
214 f->do_oop(&_thread); f->do_oop(&_throwable); | |
215 } | |
216 }; | |
217 | |
218 // dummy vm op, evaluated just to force a safepoint | |
219 class VM_ForceSafepoint: public VM_Operation { | |
220 public: | |
221 VM_ForceSafepoint() {} | |
222 void doit() {} | |
223 VMOp_Type type() const { return VMOp_ForceSafepoint; } | |
224 }; | |
225 | |
226 // dummy vm op, evaluated just to force a safepoint | |
227 class VM_ForceAsyncSafepoint: public VM_Operation { | |
228 public: | |
229 VM_ForceAsyncSafepoint() {} | |
230 void doit() {} | |
231 VMOp_Type type() const { return VMOp_ForceAsyncSafepoint; } | |
232 Mode evaluation_mode() const { return _async_safepoint; } | |
233 bool is_cheap_allocated() const { return true; } | |
234 }; | |
235 | |
236 class VM_Deoptimize: public VM_Operation { | |
237 public: | |
238 VM_Deoptimize() {} | |
239 VMOp_Type type() const { return VMOp_Deoptimize; } | |
240 void doit(); | |
241 bool allow_nested_vm_operations() const { return true; } | |
242 }; | |
243 | |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
244 |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
245 // Deopt helper that can deoptimize frames in threads other than the |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
246 // current thread. Only used through Deoptimization::deoptimize_frame. |
0 | 247 class VM_DeoptimizeFrame: public VM_Operation { |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
248 friend class Deoptimization; |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
249 |
0 | 250 private: |
251 JavaThread* _thread; | |
252 intptr_t* _id; | |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
253 VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id); |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
254 |
0 | 255 public: |
256 VMOp_Type type() const { return VMOp_DeoptimizeFrame; } | |
257 void doit(); | |
258 bool allow_nested_vm_operations() const { return true; } | |
259 }; | |
260 | |
1202 | 261 class VM_HandleFullCodeCache: public VM_Operation { |
262 private: | |
263 bool _is_full; | |
264 public: | |
265 VM_HandleFullCodeCache(bool is_full) { _is_full = is_full; } | |
266 VMOp_Type type() const { return VMOp_HandleFullCodeCache; } | |
267 void doit(); | |
268 bool allow_nested_vm_operations() const { return true; } | |
269 }; | |
270 | |
0 | 271 #ifndef PRODUCT |
272 class VM_DeoptimizeAll: public VM_Operation { | |
273 private: | |
274 KlassHandle _dependee; | |
275 public: | |
276 VM_DeoptimizeAll() {} | |
277 VMOp_Type type() const { return VMOp_DeoptimizeAll; } | |
278 void doit(); | |
279 bool allow_nested_vm_operations() const { return true; } | |
280 }; | |
281 | |
282 | |
283 class VM_ZombieAll: public VM_Operation { | |
284 public: | |
285 VM_ZombieAll() {} | |
286 VMOp_Type type() const { return VMOp_ZombieAll; } | |
287 void doit(); | |
288 bool allow_nested_vm_operations() const { return true; } | |
289 }; | |
290 #endif // PRODUCT | |
291 | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
292 class VM_UnlinkSymbols: public VM_Operation { |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
293 public: |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
294 VM_UnlinkSymbols() {} |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
295 VMOp_Type type() const { return VMOp_UnlinkSymbols; } |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
296 void doit(); |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
297 bool allow_nested_vm_operations() const { return true; } |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
298 }; |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
299 |
0 | 300 class VM_Verify: public VM_Operation { |
301 private: | |
302 KlassHandle _dependee; | |
303 public: | |
304 VM_Verify() {} | |
305 VMOp_Type type() const { return VMOp_Verify; } | |
306 void doit(); | |
307 }; | |
308 | |
309 | |
310 class VM_PrintThreads: public VM_Operation { | |
311 private: | |
312 outputStream* _out; | |
313 bool _print_concurrent_locks; | |
314 public: | |
315 VM_PrintThreads() { _out = tty; _print_concurrent_locks = PrintConcurrentLocks; } | |
316 VM_PrintThreads(outputStream* out, bool print_concurrent_locks) { _out = out; _print_concurrent_locks = print_concurrent_locks; } | |
317 VMOp_Type type() const { return VMOp_PrintThreads; } | |
318 void doit(); | |
319 bool doit_prologue(); | |
320 void doit_epilogue(); | |
321 }; | |
322 | |
323 class VM_PrintJNI: public VM_Operation { | |
324 private: | |
325 outputStream* _out; | |
326 public: | |
327 VM_PrintJNI() { _out = tty; } | |
328 VM_PrintJNI(outputStream* out) { _out = out; } | |
329 VMOp_Type type() const { return VMOp_PrintJNI; } | |
330 void doit(); | |
331 }; | |
332 | |
333 class DeadlockCycle; | |
334 class VM_FindDeadlocks: public VM_Operation { | |
335 private: | |
336 bool _concurrent_locks; | |
337 DeadlockCycle* _deadlocks; | |
338 outputStream* _out; | |
339 | |
340 public: | |
341 VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _out(NULL), _deadlocks(NULL) {}; | |
342 VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _out(st), _deadlocks(NULL) {}; | |
343 ~VM_FindDeadlocks(); | |
344 | |
345 DeadlockCycle* result() { return _deadlocks; }; | |
346 VMOp_Type type() const { return VMOp_FindDeadlocks; } | |
347 void doit(); | |
348 bool doit_prologue(); | |
349 }; | |
350 | |
351 class ThreadDumpResult; | |
352 class ThreadSnapshot; | |
353 class ThreadConcurrentLocks; | |
354 | |
355 class VM_ThreadDump : public VM_Operation { | |
356 private: | |
357 ThreadDumpResult* _result; | |
358 int _num_threads; | |
359 GrowableArray<instanceHandle>* _threads; | |
360 int _max_depth; | |
361 bool _with_locked_monitors; | |
362 bool _with_locked_synchronizers; | |
363 | |
364 ThreadSnapshot* snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl); | |
365 | |
366 public: | |
367 VM_ThreadDump(ThreadDumpResult* result, | |
368 int max_depth, // -1 indicates entire stack | |
369 bool with_locked_monitors, | |
370 bool with_locked_synchronizers); | |
371 | |
372 VM_ThreadDump(ThreadDumpResult* result, | |
373 GrowableArray<instanceHandle>* threads, | |
374 int num_threads, // -1 indicates entire stack | |
375 int max_depth, | |
376 bool with_locked_monitors, | |
377 bool with_locked_synchronizers); | |
378 | |
379 VMOp_Type type() const { return VMOp_ThreadDump; } | |
380 void doit(); | |
381 bool doit_prologue(); | |
382 void doit_epilogue(); | |
383 }; | |
384 | |
385 | |
386 class VM_Exit: public VM_Operation { | |
387 private: | |
388 int _exit_code; | |
389 static volatile bool _vm_exited; | |
390 static Thread * _shutdown_thread; | |
391 static void wait_if_vm_exited(); | |
392 public: | |
393 VM_Exit(int exit_code) { | |
394 _exit_code = exit_code; | |
395 } | |
396 static int wait_for_threads_in_native_to_block(); | |
397 static int set_vm_exited(); | |
398 static bool vm_exited() { return _vm_exited; } | |
399 static void block_if_vm_exited() { | |
400 if (_vm_exited) { | |
401 wait_if_vm_exited(); | |
402 } | |
403 } | |
404 VMOp_Type type() const { return VMOp_Exit; } | |
405 void doit(); | |
406 }; | |
1972 | 407 |
408 #endif // SHARE_VM_RUNTIME_VM_OPERATIONS_HPP |