Mercurial > hg > truffle
annotate src/share/vm/runtime/vm_operations.hpp @ 9126:bc26f978b0ce
HotSpotResolvedObjectType: implement hasFinalizeSubclass() correctly
don't use the (wrong) cached value, but ask the runtime on each request.
Fixes regression on xml.* benchmarks @ specjvm2008. The problem was:
After the constructor of Object was deoptimized due to an assumption violation,
it was recompiled again after some time. However, on recompilation, the value
of hasFinalizeSubclass for the class was not updated and it was compiled again
with a, now wrong, assumption, which then triggers deoptimization again.
This was repeated until it hit the recompilation limit (defined by
PerMethodRecompilationCutoff), and therefore only executed by the interpreter
from now on, causing the performance regression.
author | Bernhard Urban <bernhard.urban@jku.at> |
---|---|
date | Mon, 15 Apr 2013 19:54:58 +0200 |
parents | 89e4d67fdd2a |
children | 836a62f43af9 |
rev | line source |
---|---|
0 | 1 /* |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1202
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1202
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1202
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_RUNTIME_VM_OPERATIONS_HPP |
26 #define SHARE_VM_RUNTIME_VM_OPERATIONS_HPP | |
27 | |
28 #include "classfile/javaClasses.hpp" | |
29 #include "memory/allocation.hpp" | |
30 #include "oops/oop.hpp" | |
31 #include "runtime/thread.hpp" | |
32 #include "utilities/top.hpp" | |
33 | |
0 | 34 // The following classes are used for operations |
35 // initiated by a Java thread but that must | |
36 // take place in the VMThread. | |
37 | |
38 #define VM_OP_ENUM(type) VMOp_##type, | |
39 | |
40 // Note: When new VM_XXX comes up, add 'XXX' to the template table. | |
41 #define VM_OPS_DO(template) \ | |
42 template(Dummy) \ | |
43 template(ThreadStop) \ | |
44 template(ThreadDump) \ | |
45 template(PrintThreads) \ | |
46 template(FindDeadlocks) \ | |
47 template(ForceSafepoint) \ | |
48 template(ForceAsyncSafepoint) \ | |
49 template(Deoptimize) \ | |
50 template(DeoptimizeFrame) \ | |
51 template(DeoptimizeAll) \ | |
52 template(ZombieAll) \ | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
53 template(UnlinkSymbols) \ |
1202 | 54 template(HandleFullCodeCache) \ |
0 | 55 template(Verify) \ |
56 template(PrintJNI) \ | |
57 template(HeapDumper) \ | |
58 template(DeoptimizeTheWorld) \ | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
59 template(CollectForMetadataAllocation) \ |
0 | 60 template(GC_HeapInspection) \ |
61 template(GenCollectFull) \ | |
62 template(GenCollectFullConcurrent) \ | |
63 template(GenCollectForAllocation) \ | |
64 template(ParallelGCFailedAllocation) \ | |
65 template(ParallelGCSystemGC) \ | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
139
diff
changeset
|
66 template(CGC_Operation) \ |
0 | 67 template(CMS_Initial_Mark) \ |
68 template(CMS_Final_Remark) \ | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
139
diff
changeset
|
69 template(G1CollectFull) \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
139
diff
changeset
|
70 template(G1CollectForAllocation) \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
139
diff
changeset
|
71 template(G1IncCollectionPause) \ |
0 | 72 template(EnableBiasedLocking) \ |
73 template(RevokeBias) \ | |
74 template(BulkRevokeBias) \ | |
75 template(PopulateDumpSharedSpace) \ | |
76 template(JNIFunctionTableCopier) \ | |
77 template(RedefineClasses) \ | |
78 template(GetOwnedMonitorInfo) \ | |
79 template(GetObjectMonitorUsage) \ | |
80 template(GetCurrentContendedMonitor) \ | |
81 template(GetStackTrace) \ | |
82 template(GetMultipleStackTraces) \ | |
83 template(GetAllStackTraces) \ | |
84 template(GetThreadListStackTraces) \ | |
85 template(GetFrameCount) \ | |
86 template(GetFrameLocation) \ | |
87 template(ChangeBreakpoints) \ | |
88 template(GetOrSetLocal) \ | |
89 template(GetCurrentLocation) \ | |
90 template(EnterInterpOnlyMode) \ | |
91 template(ChangeSingleStep) \ | |
92 template(HeapWalkOperation) \ | |
93 template(HeapIterateOperation) \ | |
94 template(ReportJavaOutOfMemory) \ | |
4800
94ec88ca68e2
7115199: Add event tracing hooks and Java Flight Recorder infrastructure
phh
parents:
2426
diff
changeset
|
95 template(JFRCheckpoint) \ |
0 | 96 template(Exit) \ |
8710
9058789475af
7107135: Stack guard pages are no more protected after loading a shared library with executable stack
iklam
parents:
6725
diff
changeset
|
97 template(LinuxDllLoad) \ |
0 | 98 |
6197 | 99 class VM_Operation: public CHeapObj<mtInternal> { |
0 | 100 public: |
101 enum Mode { | |
102 _safepoint, // blocking, safepoint, vm_op C-heap allocated | |
103 _no_safepoint, // blocking, no safepoint, vm_op C-Heap allocated | |
104 _concurrent, // non-blocking, no safepoint, vm_op C-Heap allocated | |
105 _async_safepoint // non-blocking, safepoint, vm_op C-Heap allocated | |
106 }; | |
107 | |
108 enum VMOp_Type { | |
109 VM_OPS_DO(VM_OP_ENUM) | |
110 VMOp_Terminating | |
111 }; | |
112 | |
113 private: | |
114 Thread* _calling_thread; | |
115 ThreadPriority _priority; | |
116 long _timestamp; | |
117 VM_Operation* _next; | |
118 VM_Operation* _prev; | |
119 | |
120 // The VM operation name array | |
121 static const char* _names[]; | |
122 | |
123 public: | |
124 VM_Operation() { _calling_thread = NULL; _next = NULL; _prev = NULL; } | |
125 virtual ~VM_Operation() {} | |
126 | |
127 // VM operation support (used by VM thread) | |
128 Thread* calling_thread() const { return _calling_thread; } | |
129 ThreadPriority priority() { return _priority; } | |
130 void set_calling_thread(Thread* thread, ThreadPriority priority); | |
131 | |
132 long timestamp() const { return _timestamp; } | |
133 void set_timestamp(long timestamp) { _timestamp = timestamp; } | |
134 | |
135 // Called by VM thread - does in turn invoke doit(). Do not override this | |
136 void evaluate(); | |
137 | |
138 // evaluate() is called by the VMThread and in turn calls doit(). | |
139 // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread, | |
140 // doit_prologue() is called in that thread before transferring control to | |
141 // the VMThread. | |
142 // If doit_prologue() returns true the VM operation will proceed, and | |
143 // doit_epilogue() will be called by the JavaThread once the VM operation | |
144 // completes. If doit_prologue() returns false the VM operation is cancelled. | |
145 virtual void doit() = 0; | |
146 virtual bool doit_prologue() { return true; }; | |
147 virtual void doit_epilogue() {}; // Note: Not called if mode is: _concurrent | |
148 | |
149 // Type test | |
150 virtual bool is_methodCompiler() const { return false; } | |
151 | |
152 // Linking | |
153 VM_Operation *next() const { return _next; } | |
154 VM_Operation *prev() const { return _prev; } | |
155 void set_next(VM_Operation *next) { _next = next; } | |
156 void set_prev(VM_Operation *prev) { _prev = prev; } | |
157 | |
158 // Configuration. Override these appropriatly in subclasses. | |
159 virtual VMOp_Type type() const = 0; | |
160 virtual Mode evaluation_mode() const { return _safepoint; } | |
161 virtual bool allow_nested_vm_operations() const { return false; } | |
162 virtual bool is_cheap_allocated() const { return false; } | |
163 virtual void oops_do(OopClosure* f) { /* do nothing */ }; | |
164 | |
165 // CAUTION: <don't hang yourself with following rope> | |
166 // If you override these methods, make sure that the evaluation | |
167 // of these methods is race-free and non-blocking, since these | |
168 // methods may be evaluated either by the mutators or by the | |
169 // vm thread, either concurrently with mutators or with the mutators | |
170 // stopped. In other words, taking locks is verboten, and if there | |
171 // are any races in evaluating the conditions, they'd better be benign. | |
172 virtual bool evaluate_at_safepoint() const { | |
173 return evaluation_mode() == _safepoint || | |
174 evaluation_mode() == _async_safepoint; | |
175 } | |
176 virtual bool evaluate_concurrently() const { | |
177 return evaluation_mode() == _concurrent || | |
178 evaluation_mode() == _async_safepoint; | |
179 } | |
180 | |
181 // Debugging | |
182 void print_on_error(outputStream* st) const; | |
183 const char* name() const { return _names[type()]; } | |
184 static const char* name(int type) { | |
185 assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type"); | |
186 return _names[type]; | |
187 } | |
188 #ifndef PRODUCT | |
189 void print_on(outputStream* st) const { print_on_error(st); } | |
190 #endif | |
191 }; | |
192 | |
193 class VM_ThreadStop: public VM_Operation { | |
194 private: | |
195 oop _thread; // The Thread that the Throwable is thrown against | |
196 oop _throwable; // The Throwable thrown at the target Thread | |
197 public: | |
198 // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the | |
199 // VM operation is executed. | |
200 VM_ThreadStop(oop thread, oop throwable) { | |
201 _thread = thread; | |
202 _throwable = throwable; | |
203 } | |
204 VMOp_Type type() const { return VMOp_ThreadStop; } | |
205 oop target_thread() const { return _thread; } | |
206 oop throwable() const { return _throwable;} | |
207 void doit(); | |
208 // We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated | |
209 bool allow_nested_vm_operations() const { return true; } | |
210 Mode evaluation_mode() const { return _async_safepoint; } | |
211 bool is_cheap_allocated() const { return true; } | |
212 | |
213 // GC support | |
214 void oops_do(OopClosure* f) { | |
215 f->do_oop(&_thread); f->do_oop(&_throwable); | |
216 } | |
217 }; | |
218 | |
219 // dummy vm op, evaluated just to force a safepoint | |
220 class VM_ForceSafepoint: public VM_Operation { | |
221 public: | |
222 VM_ForceSafepoint() {} | |
223 void doit() {} | |
224 VMOp_Type type() const { return VMOp_ForceSafepoint; } | |
225 }; | |
226 | |
227 // dummy vm op, evaluated just to force a safepoint | |
228 class VM_ForceAsyncSafepoint: public VM_Operation { | |
229 public: | |
230 VM_ForceAsyncSafepoint() {} | |
231 void doit() {} | |
232 VMOp_Type type() const { return VMOp_ForceAsyncSafepoint; } | |
233 Mode evaluation_mode() const { return _async_safepoint; } | |
234 bool is_cheap_allocated() const { return true; } | |
235 }; | |
236 | |
237 class VM_Deoptimize: public VM_Operation { | |
238 public: | |
239 VM_Deoptimize() {} | |
240 VMOp_Type type() const { return VMOp_Deoptimize; } | |
241 void doit(); | |
242 bool allow_nested_vm_operations() const { return true; } | |
243 }; | |
244 | |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
245 |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
246 // Deopt helper that can deoptimize frames in threads other than the |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
247 // current thread. Only used through Deoptimization::deoptimize_frame. |
0 | 248 class VM_DeoptimizeFrame: public VM_Operation { |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
249 friend class Deoptimization; |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
250 |
0 | 251 private: |
252 JavaThread* _thread; | |
253 intptr_t* _id; | |
5110
0ebca2e35ca5
more preparations for disabling runtime feedback selectively based on deoptimization history
Christian Haeubl <christian.haeubl@oracle.com>
parents:
4800
diff
changeset
|
254 int _reason; |
0ebca2e35ca5
more preparations for disabling runtime feedback selectively based on deoptimization history
Christian Haeubl <christian.haeubl@oracle.com>
parents:
4800
diff
changeset
|
255 VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason); |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1552
diff
changeset
|
256 |
0 | 257 public: |
258 VMOp_Type type() const { return VMOp_DeoptimizeFrame; } | |
259 void doit(); | |
260 bool allow_nested_vm_operations() const { return true; } | |
261 }; | |
262 | |
1202 | 263 class VM_HandleFullCodeCache: public VM_Operation { |
264 private: | |
265 bool _is_full; | |
266 public: | |
267 VM_HandleFullCodeCache(bool is_full) { _is_full = is_full; } | |
268 VMOp_Type type() const { return VMOp_HandleFullCodeCache; } | |
269 void doit(); | |
270 bool allow_nested_vm_operations() const { return true; } | |
271 }; | |
272 | |
0 | 273 #ifndef PRODUCT |
274 class VM_DeoptimizeAll: public VM_Operation { | |
275 private: | |
276 KlassHandle _dependee; | |
277 public: | |
278 VM_DeoptimizeAll() {} | |
279 VMOp_Type type() const { return VMOp_DeoptimizeAll; } | |
280 void doit(); | |
281 bool allow_nested_vm_operations() const { return true; } | |
282 }; | |
283 | |
284 | |
285 class VM_ZombieAll: public VM_Operation { | |
286 public: | |
287 VM_ZombieAll() {} | |
288 VMOp_Type type() const { return VMOp_ZombieAll; } | |
289 void doit(); | |
290 bool allow_nested_vm_operations() const { return true; } | |
291 }; | |
292 #endif // PRODUCT | |
293 | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
294 class VM_UnlinkSymbols: public VM_Operation { |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
295 public: |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
296 VM_UnlinkSymbols() {} |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
297 VMOp_Type type() const { return VMOp_UnlinkSymbols; } |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
298 void doit(); |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
299 bool allow_nested_vm_operations() const { return true; } |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
300 }; |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1972
diff
changeset
|
301 |
0 | 302 class VM_Verify: public VM_Operation { |
303 private: | |
9071
68fe50d4f1d5
8011343: Add new flag for verifying the heap during startup
johnc
parents:
8710
diff
changeset
|
304 bool _silent; |
0 | 305 public: |
9071
68fe50d4f1d5
8011343: Add new flag for verifying the heap during startup
johnc
parents:
8710
diff
changeset
|
306 VM_Verify(bool silent) : _silent(silent) {} |
0 | 307 VMOp_Type type() const { return VMOp_Verify; } |
308 void doit(); | |
309 }; | |
310 | |
311 | |
312 class VM_PrintThreads: public VM_Operation { | |
313 private: | |
314 outputStream* _out; | |
315 bool _print_concurrent_locks; | |
316 public: | |
317 VM_PrintThreads() { _out = tty; _print_concurrent_locks = PrintConcurrentLocks; } | |
318 VM_PrintThreads(outputStream* out, bool print_concurrent_locks) { _out = out; _print_concurrent_locks = print_concurrent_locks; } | |
319 VMOp_Type type() const { return VMOp_PrintThreads; } | |
320 void doit(); | |
321 bool doit_prologue(); | |
322 void doit_epilogue(); | |
323 }; | |
324 | |
325 class VM_PrintJNI: public VM_Operation { | |
326 private: | |
327 outputStream* _out; | |
328 public: | |
329 VM_PrintJNI() { _out = tty; } | |
330 VM_PrintJNI(outputStream* out) { _out = out; } | |
331 VMOp_Type type() const { return VMOp_PrintJNI; } | |
332 void doit(); | |
333 }; | |
334 | |
335 class DeadlockCycle; | |
336 class VM_FindDeadlocks: public VM_Operation { | |
337 private: | |
338 bool _concurrent_locks; | |
339 DeadlockCycle* _deadlocks; | |
340 outputStream* _out; | |
341 | |
342 public: | |
343 VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _out(NULL), _deadlocks(NULL) {}; | |
344 VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _out(st), _deadlocks(NULL) {}; | |
345 ~VM_FindDeadlocks(); | |
346 | |
347 DeadlockCycle* result() { return _deadlocks; }; | |
348 VMOp_Type type() const { return VMOp_FindDeadlocks; } | |
349 void doit(); | |
350 bool doit_prologue(); | |
351 }; | |
352 | |
353 class ThreadDumpResult; | |
354 class ThreadSnapshot; | |
355 class ThreadConcurrentLocks; | |
356 | |
357 class VM_ThreadDump : public VM_Operation { | |
358 private: | |
359 ThreadDumpResult* _result; | |
360 int _num_threads; | |
361 GrowableArray<instanceHandle>* _threads; | |
362 int _max_depth; | |
363 bool _with_locked_monitors; | |
364 bool _with_locked_synchronizers; | |
365 | |
366 ThreadSnapshot* snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl); | |
367 | |
368 public: | |
369 VM_ThreadDump(ThreadDumpResult* result, | |
370 int max_depth, // -1 indicates entire stack | |
371 bool with_locked_monitors, | |
372 bool with_locked_synchronizers); | |
373 | |
374 VM_ThreadDump(ThreadDumpResult* result, | |
375 GrowableArray<instanceHandle>* threads, | |
376 int num_threads, // -1 indicates entire stack | |
377 int max_depth, | |
378 bool with_locked_monitors, | |
379 bool with_locked_synchronizers); | |
380 | |
381 VMOp_Type type() const { return VMOp_ThreadDump; } | |
382 void doit(); | |
383 bool doit_prologue(); | |
384 void doit_epilogue(); | |
385 }; | |
386 | |
387 | |
388 class VM_Exit: public VM_Operation { | |
389 private: | |
390 int _exit_code; | |
391 static volatile bool _vm_exited; | |
392 static Thread * _shutdown_thread; | |
393 static void wait_if_vm_exited(); | |
394 public: | |
395 VM_Exit(int exit_code) { | |
396 _exit_code = exit_code; | |
397 } | |
398 static int wait_for_threads_in_native_to_block(); | |
399 static int set_vm_exited(); | |
400 static bool vm_exited() { return _vm_exited; } | |
401 static void block_if_vm_exited() { | |
402 if (_vm_exited) { | |
403 wait_if_vm_exited(); | |
404 } | |
405 } | |
406 VMOp_Type type() const { return VMOp_Exit; } | |
407 void doit(); | |
408 }; | |
1972 | 409 |
410 #endif // SHARE_VM_RUNTIME_VM_OPERATIONS_HPP |