Mercurial > hg > truffle
annotate src/share/vm/runtime/deoptimization.cpp @ 4724:0841c0ec2ed6
7123810: new hotspot build - hs23-b10
Reviewed-by: jcoomes
author | amurillo |
---|---|
date | Fri, 23 Dec 2011 15:29:34 -0800 |
parents | e342a5110bed |
children | 04b9a2566eec e9a5e0a812c8 |
rev | line source |
---|---|
0 | 1 /* |
2142 | 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1255
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1255
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1255
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "classfile/systemDictionary.hpp" | |
27 #include "code/debugInfoRec.hpp" | |
28 #include "code/nmethod.hpp" | |
29 #include "code/pcDesc.hpp" | |
30 #include "code/scopeDesc.hpp" | |
31 #include "interpreter/bytecode.hpp" | |
32 #include "interpreter/interpreter.hpp" | |
33 #include "interpreter/oopMapCache.hpp" | |
34 #include "memory/allocation.inline.hpp" | |
35 #include "memory/oopFactory.hpp" | |
36 #include "memory/resourceArea.hpp" | |
37 #include "oops/methodOop.hpp" | |
38 #include "oops/oop.inline.hpp" | |
39 #include "prims/jvmtiThreadState.hpp" | |
40 #include "runtime/biasedLocking.hpp" | |
41 #include "runtime/compilationPolicy.hpp" | |
42 #include "runtime/deoptimization.hpp" | |
43 #include "runtime/interfaceSupport.hpp" | |
44 #include "runtime/sharedRuntime.hpp" | |
45 #include "runtime/signature.hpp" | |
46 #include "runtime/stubRoutines.hpp" | |
47 #include "runtime/thread.hpp" | |
48 #include "runtime/vframe.hpp" | |
49 #include "runtime/vframeArray.hpp" | |
50 #include "runtime/vframe_hp.hpp" | |
51 #include "utilities/events.hpp" | |
52 #include "utilities/xmlstream.hpp" | |
53 #ifdef TARGET_ARCH_x86 | |
54 # include "vmreg_x86.inline.hpp" | |
55 #endif | |
56 #ifdef TARGET_ARCH_sparc | |
57 # include "vmreg_sparc.inline.hpp" | |
58 #endif | |
59 #ifdef TARGET_ARCH_zero | |
60 # include "vmreg_zero.inline.hpp" | |
61 #endif | |
2192
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
62 #ifdef TARGET_ARCH_arm |
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
63 # include "vmreg_arm.inline.hpp" |
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
64 #endif |
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
65 #ifdef TARGET_ARCH_ppc |
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
66 # include "vmreg_ppc.inline.hpp" |
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
67 #endif |
1972 | 68 #ifdef COMPILER2 |
69 #ifdef TARGET_ARCH_MODEL_x86_32 | |
70 # include "adfiles/ad_x86_32.hpp" | |
71 #endif | |
72 #ifdef TARGET_ARCH_MODEL_x86_64 | |
73 # include "adfiles/ad_x86_64.hpp" | |
74 #endif | |
75 #ifdef TARGET_ARCH_MODEL_sparc | |
76 # include "adfiles/ad_sparc.hpp" | |
77 #endif | |
78 #ifdef TARGET_ARCH_MODEL_zero | |
79 # include "adfiles/ad_zero.hpp" | |
80 #endif | |
2192
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
81 #ifdef TARGET_ARCH_MODEL_arm |
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
82 # include "adfiles/ad_arm.hpp" |
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
83 #endif |
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
84 #ifdef TARGET_ARCH_MODEL_ppc |
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
85 # include "adfiles/ad_ppc.hpp" |
b92c45f2bc75
7016023: Enable building ARM and PPC from src/closed repository
bobv
parents:
2177
diff
changeset
|
86 #endif |
1972 | 87 #endif |
0 | 88 |
89 bool DeoptimizationMarker::_is_active = false; | |
90 | |
91 Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, | |
92 int caller_adjustment, | |
3369
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
93 int caller_actual_parameters, |
0 | 94 int number_of_frames, |
95 intptr_t* frame_sizes, | |
96 address* frame_pcs, | |
97 BasicType return_type) { | |
98 _size_of_deoptimized_frame = size_of_deoptimized_frame; | |
99 _caller_adjustment = caller_adjustment; | |
3369
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
100 _caller_actual_parameters = caller_actual_parameters; |
0 | 101 _number_of_frames = number_of_frames; |
102 _frame_sizes = frame_sizes; | |
103 _frame_pcs = frame_pcs; | |
104 _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2); | |
105 _return_type = return_type; | |
3931
5432047c7db7
7087445: Improve platform independence of JSR292 shared code
bdelsart
parents:
3369
diff
changeset
|
106 _initial_info = 0; |
0 | 107 // PD (x86 only) |
108 _counter_temp = 0; | |
109 _unpack_kind = 0; | |
110 _sender_sp_temp = 0; | |
111 | |
112 _total_frame_sizes = size_of_frames(); | |
113 } | |
114 | |
115 | |
116 Deoptimization::UnrollBlock::~UnrollBlock() { | |
117 FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes); | |
118 FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs); | |
119 FREE_C_HEAP_ARRAY(intptr_t, _register_block); | |
120 } | |
121 | |
122 | |
123 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const { | |
124 assert(register_number < RegisterMap::reg_count, "checking register number"); | |
125 return &_register_block[register_number * 2]; | |
126 } | |
127 | |
128 | |
129 | |
130 int Deoptimization::UnrollBlock::size_of_frames() const { | |
131 // Acount first for the adjustment of the initial frame | |
132 int result = _caller_adjustment; | |
133 for (int index = 0; index < number_of_frames(); index++) { | |
134 result += frame_sizes()[index]; | |
135 } | |
136 return result; | |
137 } | |
138 | |
139 | |
140 void Deoptimization::UnrollBlock::print() { | |
141 ttyLocker ttyl; | |
142 tty->print_cr("UnrollBlock"); | |
143 tty->print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame); | |
144 tty->print( " frame_sizes: "); | |
145 for (int index = 0; index < number_of_frames(); index++) { | |
146 tty->print("%d ", frame_sizes()[index]); | |
147 } | |
148 tty->cr(); | |
149 } | |
150 | |
151 | |
152 // In order to make fetch_unroll_info work properly with escape | |
153 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and | |
154 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation | |
155 // of previously eliminated objects occurs in realloc_objects, which is | |
156 // called from the method fetch_unroll_info_helper below. | |
157 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread)) | |
158 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, | |
159 // but makes the entry a little slower. There is however a little dance we have to | |
160 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro | |
161 | |
162 // fetch_unroll_info() is called at the beginning of the deoptimization | |
163 // handler. Note this fact before we start generating temporary frames | |
164 // that can confuse an asynchronous stack walker. This counter is | |
165 // decremented at the end of unpack_frames(). | |
166 thread->inc_in_deopt_handler(); | |
167 | |
168 return fetch_unroll_info_helper(thread); | |
169 JRT_END | |
170 | |
171 | |
172 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap) | |
173 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread) { | |
174 | |
175 // Note: there is a safepoint safety issue here. No matter whether we enter | |
176 // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once | |
177 // the vframeArray is created. | |
178 // | |
179 | |
180 // Allocate our special deoptimization ResourceMark | |
181 DeoptResourceMark* dmark = new DeoptResourceMark(thread); | |
182 assert(thread->deopt_mark() == NULL, "Pending deopt!"); | |
183 thread->set_deopt_mark(dmark); | |
184 | |
185 frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect | |
186 RegisterMap map(thread, true); | |
187 RegisterMap dummy_map(thread, false); | |
188 // Now get the deoptee with a valid map | |
189 frame deoptee = stub_frame.sender(&map); | |
1814
fd5d4527cdf5
6986270: guarantee(*bcp != Bytecodes::_monitorenter || exec_mode != Deoptimization::Unpack_exception) fails
iveresov
parents:
1783
diff
changeset
|
190 // Set the deoptee nmethod |
fd5d4527cdf5
6986270: guarantee(*bcp != Bytecodes::_monitorenter || exec_mode != Deoptimization::Unpack_exception) fails
iveresov
parents:
1783
diff
changeset
|
191 assert(thread->deopt_nmethod() == NULL, "Pending deopt!"); |
fd5d4527cdf5
6986270: guarantee(*bcp != Bytecodes::_monitorenter || exec_mode != Deoptimization::Unpack_exception) fails
iveresov
parents:
1783
diff
changeset
|
192 thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null()); |
0 | 193 |
3336
2e038ad0c1d0
7009361: JSR 292 Invalid value on stack on solaris-sparc with -Xcomp
never
parents:
2338
diff
changeset
|
194 if (VerifyStack) { |
2e038ad0c1d0
7009361: JSR 292 Invalid value on stack on solaris-sparc with -Xcomp
never
parents:
2338
diff
changeset
|
195 thread->validate_frame_layout(); |
2e038ad0c1d0
7009361: JSR 292 Invalid value on stack on solaris-sparc with -Xcomp
never
parents:
2338
diff
changeset
|
196 } |
2e038ad0c1d0
7009361: JSR 292 Invalid value on stack on solaris-sparc with -Xcomp
never
parents:
2338
diff
changeset
|
197 |
0 | 198 // Create a growable array of VFrames where each VFrame represents an inlined |
199 // Java frame. This storage is allocated with the usual system arena. | |
200 assert(deoptee.is_compiled_frame(), "Wrong frame type"); | |
201 GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10); | |
202 vframe* vf = vframe::new_vframe(&deoptee, &map, thread); | |
203 while (!vf->is_top()) { | |
204 assert(vf->is_compiled_frame(), "Wrong frame type"); | |
205 chunk->push(compiledVFrame::cast(vf)); | |
206 vf = vf->sender(); | |
207 } | |
208 assert(vf->is_compiled_frame(), "Wrong frame type"); | |
209 chunk->push(compiledVFrame::cast(vf)); | |
210 | |
211 #ifdef COMPILER2 | |
212 // Reallocate the non-escaping objects and restore their fields. Then | |
213 // relock objects if synchronization on them was eliminated. | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
214 if (DoEscapeAnalysis) { |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
215 if (EliminateAllocations) { |
83
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
216 assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames"); |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
217 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects(); |
1253
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
218 |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
219 // The flag return_oop() indicates call sites which return oop |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
220 // in compiled code. Such sites include java method calls, |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
221 // runtime calls (for example, used to allocate new objects/arrays |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
222 // on slow code path) and any other calls generated in compiled code. |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
223 // It is not guaranteed that we can get such information here only |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
224 // by analyzing bytecode in deoptimized frames. This is why this flag |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
225 // is set during method compilation (see Compile::Process_OopMap_Node()). |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
226 bool save_oop_result = chunk->at(0)->scope()->return_oop(); |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
227 Handle return_value; |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
228 if (save_oop_result) { |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
229 // Reallocation may trigger GC. If deoptimization happened on return from |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
230 // call which returns oop we need to save it since it is not in oopmap. |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
231 oop result = deoptee.saved_oop_result(&map); |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
232 assert(result == NULL || result->is_oop(), "must be oop"); |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
233 return_value = Handle(thread, result); |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
234 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer"); |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
235 if (TraceDeoptimization) { |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
236 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, result, thread); |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
237 } |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
238 } |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
239 bool reallocated = false; |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
240 if (objects != NULL) { |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
241 JRT_BLOCK |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
242 reallocated = realloc_objects(thread, &deoptee, objects, THREAD); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
243 JRT_END |
0 | 244 } |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
245 if (reallocated) { |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
246 reassign_fields(&deoptee, &map, objects); |
0 | 247 #ifndef PRODUCT |
248 if (TraceDeoptimization) { | |
249 ttyLocker ttyl; | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
250 tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
251 print_objects(objects); |
1253
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
252 } |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
253 #endif |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
254 } |
1253
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
255 if (save_oop_result) { |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
256 // Restore result. |
f70b0d9ab095
6910618: C2: Error: assert(d->is_oop(),"JVM_ArrayCopy: dst not an oop")
kvn
parents:
1206
diff
changeset
|
257 deoptee.set_saved_oop_result(&map, return_value()); |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
258 } |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
259 } |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
260 if (EliminateLocks) { |
83
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
261 #ifndef PRODUCT |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
262 bool first = true; |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
263 #endif |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
264 for (int i = 0; i < chunk->length(); i++) { |
83
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
265 compiledVFrame* cvf = chunk->at(i); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
266 assert (cvf->scope() != NULL,"expect only compiled java frames"); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
267 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
268 if (monitors->is_nonempty()) { |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
269 relock_objects(monitors, thread); |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
270 #ifndef PRODUCT |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
271 if (TraceDeoptimization) { |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
272 ttyLocker ttyl; |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
273 for (int j = 0; j < monitors->length(); j++) { |
83
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
274 MonitorInfo* mi = monitors->at(j); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
275 if (mi->eliminated()) { |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
276 if (first) { |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
277 first = false; |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
278 tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
279 } |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
280 tty->print_cr(" object <" INTPTR_FORMAT "> locked", mi->owner()); |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
281 } |
0 | 282 } |
283 } | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
284 #endif |
0 | 285 } |
286 } | |
287 } | |
288 } | |
289 #endif // COMPILER2 | |
290 // Ensure that no safepoint is taken after pointers have been stored | |
291 // in fields of rematerialized objects. If a safepoint occurs from here on | |
292 // out the java state residing in the vframeArray will be missed. | |
293 No_Safepoint_Verifier no_safepoint; | |
294 | |
295 vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk); | |
296 | |
297 assert(thread->vframe_array_head() == NULL, "Pending deopt!");; | |
298 thread->set_vframe_array_head(array); | |
299 | |
300 // Now that the vframeArray has been created if we have any deferred local writes | |
301 // added by jvmti then we can free up that structure as the data is now in the | |
302 // vframeArray | |
303 | |
304 if (thread->deferred_locals() != NULL) { | |
305 GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals(); | |
306 int i = 0; | |
307 do { | |
308 // Because of inlining we could have multiple vframes for a single frame | |
309 // and several of the vframes could have deferred writes. Find them all. | |
310 if (list->at(i)->id() == array->original().id()) { | |
311 jvmtiDeferredLocalVariableSet* dlv = list->at(i); | |
312 list->remove_at(i); | |
313 // individual jvmtiDeferredLocalVariableSet are CHeapObj's | |
314 delete dlv; | |
315 } else { | |
316 i++; | |
317 } | |
318 } while ( i < list->length() ); | |
319 if (list->length() == 0) { | |
320 thread->set_deferred_locals(NULL); | |
321 // free the list and elements back to C heap. | |
322 delete list; | |
323 } | |
324 | |
325 } | |
326 | |
1692 | 327 #ifndef SHARK |
0 | 328 // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info. |
329 CodeBlob* cb = stub_frame.cb(); | |
330 // Verify we have the right vframeArray | |
331 assert(cb->frame_size() >= 0, "Unexpected frame size"); | |
332 intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size(); | |
333 | |
1204 | 334 // If the deopt call site is a MethodHandle invoke call site we have |
335 // to adjust the unpack_sp. | |
336 nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null(); | |
337 if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc())) | |
338 unpack_sp = deoptee.unextended_sp(); | |
339 | |
0 | 340 #ifdef ASSERT |
341 assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking"); | |
342 Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp); | |
343 #endif | |
1692 | 344 #else |
345 intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp(); | |
346 #endif // !SHARK | |
347 | |
0 | 348 // This is a guarantee instead of an assert because if vframe doesn't match |
349 // we will unpack the wrong deoptimized frame and wind up in strange places | |
350 // where it will be very difficult to figure out what went wrong. Better | |
351 // to die an early death here than some very obscure death later when the | |
352 // trail is cold. | |
353 // Note: on ia64 this guarantee can be fooled by frames with no memory stack | |
354 // in that it will fail to detect a problem when there is one. This needs | |
355 // more work in tiger timeframe. | |
356 guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack"); | |
357 | |
358 int number_of_frames = array->frames(); | |
359 | |
360 // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost | |
361 // virtual activation, which is the reverse of the elements in the vframes array. | |
362 intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames); | |
363 // +1 because we always have an interpreter return address for the final slot. | |
364 address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1); | |
365 int popframe_extra_args = 0; | |
366 // Create an interpreter return address for the stub to use as its return | |
367 // address so the skeletal frames are perfectly walkable | |
368 frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0); | |
369 | |
370 // PopFrame requires that the preserved incoming arguments from the recently-popped topmost | |
371 // activation be put back on the expression stack of the caller for reexecution | |
372 if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) { | |
373 popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words()); | |
374 } | |
375 | |
3369
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
376 // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
377 // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
378 // than simply use array->sender.pc(). This requires us to walk the current set of frames |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
379 // |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
380 frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
381 deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
382 |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
383 // It's possible that the number of paramters at the call site is |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
384 // different than number of arguments in the callee when method |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
385 // handles are used. If the caller is interpreted get the real |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
386 // value so that the proper amount of space can be added to it's |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
387 // frame. |
4042
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
388 bool caller_was_method_handle = false; |
3369
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
389 if (deopt_sender.is_interpreted_frame()) { |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
390 methodHandle method = deopt_sender.interpreter_frame_method(); |
4055
e342a5110bed
7106774: JSR 292: nightly test inlineMHTarget fails with wrong result
twisti
parents:
4042
diff
changeset
|
391 Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci()); |
e342a5110bed
7106774: JSR 292: nightly test inlineMHTarget fails with wrong result
twisti
parents:
4042
diff
changeset
|
392 if (cur.is_method_handle_invoke()) { |
4042
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
393 // Method handle invokes may involve fairly arbitrary chains of |
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
394 // calls so it's impossible to know how much actual space the |
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
395 // caller has for locals. |
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
396 caller_was_method_handle = true; |
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
397 } |
3369
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
398 } |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
399 |
0 | 400 // |
401 // frame_sizes/frame_pcs[0] oldest frame (int or c2i) | |
402 // frame_sizes/frame_pcs[1] next oldest frame (int) | |
403 // frame_sizes/frame_pcs[n] youngest frame (int) | |
404 // | |
405 // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame | |
406 // owns the space for the return address to it's caller). Confusing ain't it. | |
407 // | |
408 // The vframe array can address vframes with indices running from | |
409 // 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame. | |
410 // When we create the skeletal frames we need the oldest frame to be in the zero slot | |
411 // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk. | |
412 // so things look a little strange in this loop. | |
413 // | |
4042
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
414 int callee_parameters = 0; |
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
415 int callee_locals = 0; |
0 | 416 for (int index = 0; index < array->frames(); index++ ) { |
417 // frame[number_of_frames - 1 ] = on_stack_size(youngest) | |
418 // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest)) | |
419 // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest))) | |
3369
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
420 int caller_parms = callee_parameters; |
4042
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
421 if ((index == array->frames() - 1) && caller_was_method_handle) { |
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
422 caller_parms = 0; |
3369
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
423 } |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
424 frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(caller_parms, |
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
425 callee_parameters, |
0 | 426 callee_locals, |
427 index == 0, | |
428 popframe_extra_args); | |
429 // This pc doesn't have to be perfect just good enough to identify the frame | |
430 // as interpreted so the skeleton frame will be walkable | |
431 // The correct pc will be set when the skeleton frame is completely filled out | |
432 // The final pc we store in the loop is wrong and will be overwritten below | |
433 frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset; | |
434 | |
435 callee_parameters = array->element(index)->method()->size_of_parameters(); | |
436 callee_locals = array->element(index)->method()->max_locals(); | |
437 popframe_extra_args = 0; | |
438 } | |
439 | |
440 // Compute whether the root vframe returns a float or double value. | |
441 BasicType return_type; | |
442 { | |
443 HandleMark hm; | |
444 methodHandle method(thread, array->element(0)->method()); | |
2142 | 445 Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci()); |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2142
diff
changeset
|
446 return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL; |
0 | 447 } |
448 | |
449 // Compute information for handling adapters and adjusting the frame size of the caller. | |
450 int caller_adjustment = 0; | |
451 | |
452 // Compute the amount the oldest interpreter frame will have to adjust | |
453 // its caller's stack by. If the caller is a compiled frame then | |
454 // we pretend that the callee has no parameters so that the | |
455 // extension counts for the full amount of locals and not just | |
456 // locals-parms. This is because without a c2i adapter the parm | |
457 // area as created by the compiled frame will not be usable by | |
458 // the interpreter. (Depending on the calling convention there | |
459 // may not even be enough space). | |
460 | |
461 // QQQ I'd rather see this pushed down into last_frame_adjust | |
462 // and have it take the sender (aka caller). | |
463 | |
4042
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
464 if (deopt_sender.is_compiled_frame() || caller_was_method_handle) { |
0 | 465 caller_adjustment = last_frame_adjust(0, callee_locals); |
4042
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
466 } else if (callee_locals > callee_parameters) { |
0 | 467 // The caller frame may need extending to accommodate |
468 // non-parameter locals of the first unpacked interpreted frame. | |
469 // Compute that adjustment. | |
4042
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
470 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals); |
0 | 471 } |
472 | |
473 // If the sender is deoptimized the we must retrieve the address of the handler | |
474 // since the frame will "magically" show the original pc before the deopt | |
475 // and we'd undo the deopt. | |
476 | |
477 frame_pcs[0] = deopt_sender.raw_pc(); | |
478 | |
1692 | 479 #ifndef SHARK |
0 | 480 assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc"); |
1692 | 481 #endif // SHARK |
0 | 482 |
483 UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord, | |
484 caller_adjustment * BytesPerWord, | |
4042
b20d64f83668
7090904: JSR 292: JRuby junit test crashes in PSScavengeRootsClosure::do_oop
twisti
parents:
3931
diff
changeset
|
485 caller_was_method_handle ? 0 : callee_parameters, |
0 | 486 number_of_frames, |
487 frame_sizes, | |
488 frame_pcs, | |
489 return_type); | |
3931
5432047c7db7
7087445: Improve platform independence of JSR292 shared code
bdelsart
parents:
3369
diff
changeset
|
490 // On some platforms, we need a way to pass some platform dependent |
5432047c7db7
7087445: Improve platform independence of JSR292 shared code
bdelsart
parents:
3369
diff
changeset
|
491 // information to the unpacking code so the skeletal frames come out |
5432047c7db7
7087445: Improve platform independence of JSR292 shared code
bdelsart
parents:
3369
diff
changeset
|
492 // correct (initial fp value, unextended sp, ...) |
5432047c7db7
7087445: Improve platform independence of JSR292 shared code
bdelsart
parents:
3369
diff
changeset
|
493 info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info()); |
0 | 494 |
495 if (array->frames() > 1) { | |
496 if (VerifyStack && TraceDeoptimization) { | |
497 tty->print_cr("Deoptimizing method containing inlining"); | |
498 } | |
499 } | |
500 | |
501 array->set_unroll_block(info); | |
502 return info; | |
503 } | |
504 | |
505 // Called to cleanup deoptimization data structures in normal case | |
506 // after unpacking to stack and when stack overflow error occurs | |
507 void Deoptimization::cleanup_deopt_info(JavaThread *thread, | |
508 vframeArray *array) { | |
509 | |
510 // Get array if coming from exception | |
511 if (array == NULL) { | |
512 array = thread->vframe_array_head(); | |
513 } | |
514 thread->set_vframe_array_head(NULL); | |
515 | |
516 // Free the previous UnrollBlock | |
517 vframeArray* old_array = thread->vframe_array_last(); | |
518 thread->set_vframe_array_last(array); | |
519 | |
520 if (old_array != NULL) { | |
521 UnrollBlock* old_info = old_array->unroll_block(); | |
522 old_array->set_unroll_block(NULL); | |
523 delete old_info; | |
524 delete old_array; | |
525 } | |
526 | |
527 // Deallocate any resource creating in this routine and any ResourceObjs allocated | |
528 // inside the vframeArray (StackValueCollections) | |
529 | |
530 delete thread->deopt_mark(); | |
531 thread->set_deopt_mark(NULL); | |
1814
fd5d4527cdf5
6986270: guarantee(*bcp != Bytecodes::_monitorenter || exec_mode != Deoptimization::Unpack_exception) fails
iveresov
parents:
1783
diff
changeset
|
532 thread->set_deopt_nmethod(NULL); |
0 | 533 |
534 | |
535 if (JvmtiExport::can_pop_frame()) { | |
536 #ifndef CC_INTERP | |
537 // Regardless of whether we entered this routine with the pending | |
538 // popframe condition bit set, we should always clear it now | |
539 thread->clear_popframe_condition(); | |
540 #else | |
541 // C++ interpeter will clear has_pending_popframe when it enters | |
542 // with method_resume. For deopt_resume2 we clear it now. | |
543 if (thread->popframe_forcing_deopt_reexecution()) | |
544 thread->clear_popframe_condition(); | |
545 #endif /* CC_INTERP */ | |
546 } | |
547 | |
548 // unpack_frames() is called at the end of the deoptimization handler | |
549 // and (in C2) at the end of the uncommon trap handler. Note this fact | |
550 // so that an asynchronous stack walker can work again. This counter is | |
551 // incremented at the beginning of fetch_unroll_info() and (in C2) at | |
552 // the beginning of uncommon_trap(). | |
553 thread->dec_in_deopt_handler(); | |
554 } | |
555 | |
556 | |
557 // Return BasicType of value being returned | |
558 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)) | |
559 | |
560 // We are already active int he special DeoptResourceMark any ResourceObj's we | |
561 // allocate will be freed at the end of the routine. | |
562 | |
563 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, | |
564 // but makes the entry a little slower. There is however a little dance we have to | |
565 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro | |
566 ResetNoHandleMark rnhm; // No-op in release/product versions | |
567 HandleMark hm; | |
568 | |
569 frame stub_frame = thread->last_frame(); | |
570 | |
571 // Since the frame to unpack is the top frame of this thread, the vframe_array_head | |
572 // must point to the vframeArray for the unpack frame. | |
573 vframeArray* array = thread->vframe_array_head(); | |
574 | |
575 #ifndef PRODUCT | |
576 if (TraceDeoptimization) { | |
577 tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode); | |
578 } | |
579 #endif | |
580 | |
581 UnrollBlock* info = array->unroll_block(); | |
582 | |
583 // Unpack the interpreter frames and any adapter frame (c2 only) we might create. | |
3369
3d2ab563047a
7043461: VM crashes in void LinkResolver::runtime_resolve_virtual_method
never
parents:
3346
diff
changeset
|
584 array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters()); |
0 | 585 |
586 BasicType bt = info->return_type(); | |
587 | |
588 // If we have an exception pending, claim that the return type is an oop | |
589 // so the deopt_blob does not overwrite the exception_oop. | |
590 | |
591 if (exec_mode == Unpack_exception) | |
592 bt = T_OBJECT; | |
593 | |
594 // Cleanup thread deopt data | |
595 cleanup_deopt_info(thread, array); | |
596 | |
597 #ifndef PRODUCT | |
598 if (VerifyStack) { | |
599 ResourceMark res_mark; | |
600 | |
3336
2e038ad0c1d0
7009361: JSR 292 Invalid value on stack on solaris-sparc with -Xcomp
never
parents:
2338
diff
changeset
|
601 thread->validate_frame_layout(); |
2e038ad0c1d0
7009361: JSR 292 Invalid value on stack on solaris-sparc with -Xcomp
never
parents:
2338
diff
changeset
|
602 |
0 | 603 // Verify that the just-unpacked frames match the interpreter's |
604 // notions of expression stack and locals | |
605 vframeArray* cur_array = thread->vframe_array_last(); | |
606 RegisterMap rm(thread, false); | |
607 rm.set_include_argument_oops(false); | |
608 bool is_top_frame = true; | |
609 int callee_size_of_parameters = 0; | |
610 int callee_max_locals = 0; | |
611 for (int i = 0; i < cur_array->frames(); i++) { | |
612 vframeArrayElement* el = cur_array->element(i); | |
613 frame* iframe = el->iframe(); | |
614 guarantee(iframe->is_interpreted_frame(), "Wrong frame type"); | |
615 | |
616 // Get the oop map for this bci | |
617 InterpreterOopMap mask; | |
618 int cur_invoke_parameter_size = 0; | |
619 bool try_next_mask = false; | |
620 int next_mask_expression_stack_size = -1; | |
621 int top_frame_expression_stack_adjustment = 0; | |
622 methodHandle mh(thread, iframe->interpreter_frame_method()); | |
623 OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask); | |
624 BytecodeStream str(mh); | |
625 str.set_start(iframe->interpreter_frame_bci()); | |
626 int max_bci = mh->code_size(); | |
627 // Get to the next bytecode if possible | |
628 assert(str.bci() < max_bci, "bci in interpreter frame out of bounds"); | |
629 // Check to see if we can grab the number of outgoing arguments | |
630 // at an uncommon trap for an invoke (where the compiler | |
631 // generates debug info before the invoke has executed) | |
632 Bytecodes::Code cur_code = str.next(); | |
633 if (cur_code == Bytecodes::_invokevirtual || | |
634 cur_code == Bytecodes::_invokespecial || | |
635 cur_code == Bytecodes::_invokestatic || | |
636 cur_code == Bytecodes::_invokeinterface) { | |
2142 | 637 Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci()); |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2142
diff
changeset
|
638 Symbol* signature = invoke.signature(); |
0 | 639 ArgumentSizeComputer asc(signature); |
640 cur_invoke_parameter_size = asc.size(); | |
641 if (cur_code != Bytecodes::_invokestatic) { | |
642 // Add in receiver | |
643 ++cur_invoke_parameter_size; | |
644 } | |
645 } | |
646 if (str.bci() < max_bci) { | |
647 Bytecodes::Code bc = str.next(); | |
648 if (bc >= 0) { | |
649 // The interpreter oop map generator reports results before | |
650 // the current bytecode has executed except in the case of | |
651 // calls. It seems to be hard to tell whether the compiler | |
652 // has emitted debug information matching the "state before" | |
653 // a given bytecode or the state after, so we try both | |
654 switch (cur_code) { | |
655 case Bytecodes::_invokevirtual: | |
656 case Bytecodes::_invokespecial: | |
657 case Bytecodes::_invokestatic: | |
658 case Bytecodes::_invokeinterface: | |
659 case Bytecodes::_athrow: | |
660 break; | |
661 default: { | |
662 InterpreterOopMap next_mask; | |
663 OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask); | |
664 next_mask_expression_stack_size = next_mask.expression_stack_size(); | |
665 // Need to subtract off the size of the result type of | |
666 // the bytecode because this is not described in the | |
667 // debug info but returned to the interpreter in the TOS | |
668 // caching register | |
669 BasicType bytecode_result_type = Bytecodes::result_type(cur_code); | |
670 if (bytecode_result_type != T_ILLEGAL) { | |
671 top_frame_expression_stack_adjustment = type2size[bytecode_result_type]; | |
672 } | |
673 assert(top_frame_expression_stack_adjustment >= 0, ""); | |
674 try_next_mask = true; | |
675 break; | |
676 } | |
677 } | |
678 } | |
679 } | |
680 | |
681 // Verify stack depth and oops in frame | |
682 // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc) | |
683 if (!( | |
684 /* SPARC */ | |
685 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) || | |
686 /* x86 */ | |
687 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) || | |
688 (try_next_mask && | |
689 (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size - | |
690 top_frame_expression_stack_adjustment))) || | |
691 (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) || | |
692 (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) && | |
693 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size)) | |
694 )) { | |
695 ttyLocker ttyl; | |
696 | |
697 // Print out some information that will help us debug the problem | |
698 tty->print_cr("Wrong number of expression stack elements during deoptimization"); | |
699 tty->print_cr(" Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1); | |
700 tty->print_cr(" Fabricated interpreter frame had %d expression stack elements", | |
701 iframe->interpreter_frame_expression_stack_size()); | |
702 tty->print_cr(" Interpreter oop map had %d expression stack elements", mask.expression_stack_size()); | |
703 tty->print_cr(" try_next_mask = %d", try_next_mask); | |
704 tty->print_cr(" next_mask_expression_stack_size = %d", next_mask_expression_stack_size); | |
705 tty->print_cr(" callee_size_of_parameters = %d", callee_size_of_parameters); | |
706 tty->print_cr(" callee_max_locals = %d", callee_max_locals); | |
707 tty->print_cr(" top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment); | |
708 tty->print_cr(" exec_mode = %d", exec_mode); | |
709 tty->print_cr(" cur_invoke_parameter_size = %d", cur_invoke_parameter_size); | |
710 tty->print_cr(" Thread = " INTPTR_FORMAT ", thread ID = " UINTX_FORMAT, thread, thread->osthread()->thread_id()); | |
711 tty->print_cr(" Interpreted frames:"); | |
712 for (int k = 0; k < cur_array->frames(); k++) { | |
713 vframeArrayElement* el = cur_array->element(k); | |
714 tty->print_cr(" %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci()); | |
715 } | |
716 cur_array->print_on_2(tty); | |
717 guarantee(false, "wrong number of expression stack elements during deopt"); | |
718 } | |
719 VerifyOopClosure verify; | |
720 iframe->oops_interpreted_do(&verify, &rm, false); | |
721 callee_size_of_parameters = mh->size_of_parameters(); | |
722 callee_max_locals = mh->max_locals(); | |
723 is_top_frame = false; | |
724 } | |
725 } | |
726 #endif /* !PRODUCT */ | |
727 | |
728 | |
729 return bt; | |
730 JRT_END | |
731 | |
732 | |
733 int Deoptimization::deoptimize_dependents() { | |
734 Threads::deoptimized_wrt_marked_nmethods(); | |
735 return 0; | |
736 } | |
737 | |
738 | |
739 #ifdef COMPILER2 | |
740 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) { | |
741 Handle pending_exception(thread->pending_exception()); | |
742 const char* exception_file = thread->exception_file(); | |
743 int exception_line = thread->exception_line(); | |
744 thread->clear_pending_exception(); | |
745 | |
746 for (int i = 0; i < objects->length(); i++) { | |
747 assert(objects->at(i)->is_object(), "invalid debug information"); | |
748 ObjectValue* sv = (ObjectValue*) objects->at(i); | |
749 | |
750 KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()()); | |
751 oop obj = NULL; | |
752 | |
753 if (k->oop_is_instance()) { | |
754 instanceKlass* ik = instanceKlass::cast(k()); | |
755 obj = ik->allocate_instance(CHECK_(false)); | |
756 } else if (k->oop_is_typeArray()) { | |
757 typeArrayKlass* ak = typeArrayKlass::cast(k()); | |
758 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); | |
759 int len = sv->field_size() / type2size[ak->element_type()]; | |
760 obj = ak->allocate(len, CHECK_(false)); | |
761 } else if (k->oop_is_objArray()) { | |
762 objArrayKlass* ak = objArrayKlass::cast(k()); | |
763 obj = ak->allocate(sv->field_size(), CHECK_(false)); | |
764 } | |
765 | |
766 assert(obj != NULL, "allocation failed"); | |
767 assert(sv->value().is_null(), "redundant reallocation"); | |
768 sv->set_value(obj); | |
769 } | |
770 | |
771 if (pending_exception.not_null()) { | |
772 thread->set_pending_exception(pending_exception(), exception_file, exception_line); | |
773 } | |
774 | |
775 return true; | |
776 } | |
777 | |
778 // This assumes that the fields are stored in ObjectValue in the same order | |
779 // they are yielded by do_nonstatic_fields. | |
780 class FieldReassigner: public FieldClosure { | |
781 frame* _fr; | |
782 RegisterMap* _reg_map; | |
783 ObjectValue* _sv; | |
784 instanceKlass* _ik; | |
785 oop _obj; | |
786 | |
787 int _i; | |
788 public: | |
789 FieldReassigner(frame* fr, RegisterMap* reg_map, ObjectValue* sv, oop obj) : | |
790 _fr(fr), _reg_map(reg_map), _sv(sv), _obj(obj), _i(0) {} | |
791 | |
792 int i() const { return _i; } | |
793 | |
794 | |
795 void do_field(fieldDescriptor* fd) { | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
796 intptr_t val; |
0 | 797 StackValue* value = |
798 StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(i())); | |
799 int offset = fd->offset(); | |
800 switch (fd->field_type()) { | |
801 case T_OBJECT: case T_ARRAY: | |
802 assert(value->type() == T_OBJECT, "Agreement."); | |
803 _obj->obj_field_put(offset, value->get_obj()()); | |
804 break; | |
805 | |
806 case T_LONG: case T_DOUBLE: { | |
807 assert(value->type() == T_INT, "Agreement."); | |
808 StackValue* low = | |
809 StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i)); | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
810 #ifdef _LP64 |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
811 jlong res = (jlong)low->get_int(); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
812 #else |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
813 #ifdef SPARC |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
814 // For SPARC we have to swap high and low words. |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
815 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
816 #else |
0 | 817 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
818 #endif //SPARC |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
819 #endif |
0 | 820 _obj->long_field_put(offset, res); |
821 break; | |
822 } | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
823 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. |
0 | 824 case T_INT: case T_FLOAT: // 4 bytes. |
825 assert(value->type() == T_INT, "Agreement."); | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
826 val = value->get_int(); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
827 _obj->int_field_put(offset, (jint)*((jint*)&val)); |
0 | 828 break; |
829 | |
830 case T_SHORT: case T_CHAR: // 2 bytes | |
831 assert(value->type() == T_INT, "Agreement."); | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
832 val = value->get_int(); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
833 _obj->short_field_put(offset, (jshort)*((jint*)&val)); |
0 | 834 break; |
835 | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
836 case T_BOOLEAN: case T_BYTE: // 1 byte |
0 | 837 assert(value->type() == T_INT, "Agreement."); |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
838 val = value->get_int(); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
839 _obj->bool_field_put(offset, (jboolean)*((jint*)&val)); |
0 | 840 break; |
841 | |
842 default: | |
843 ShouldNotReachHere(); | |
844 } | |
845 _i++; | |
846 } | |
847 }; | |
848 | |
849 // restore elements of an eliminated type array | |
850 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) { | |
851 int index = 0; | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
852 intptr_t val; |
0 | 853 |
854 for (int i = 0; i < sv->field_size(); i++) { | |
855 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); | |
856 switch(type) { | |
44
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
857 case T_LONG: case T_DOUBLE: { |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
858 assert(value->type() == T_INT, "Agreement."); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
859 StackValue* low = |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
860 StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
861 #ifdef _LP64 |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
862 jlong res = (jlong)low->get_int(); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
863 #else |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
864 #ifdef SPARC |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
865 // For SPARC we have to swap high and low words. |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
866 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
867 #else |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
868 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
869 #endif //SPARC |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
870 #endif |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
871 obj->long_at_put(index, res); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
872 break; |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
873 } |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
874 |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
875 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
876 case T_INT: case T_FLOAT: // 4 bytes. |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
877 assert(value->type() == T_INT, "Agreement."); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
878 val = value->get_int(); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
879 obj->int_at_put(index, (jint)*((jint*)&val)); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
880 break; |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
881 |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
882 case T_SHORT: case T_CHAR: // 2 bytes |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
883 assert(value->type() == T_INT, "Agreement."); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
884 val = value->get_int(); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
885 obj->short_at_put(index, (jshort)*((jint*)&val)); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
886 break; |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
887 |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
888 case T_BOOLEAN: case T_BYTE: // 1 byte |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
889 assert(value->type() == T_INT, "Agreement."); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
890 val = value->get_int(); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
891 obj->bool_at_put(index, (jboolean)*((jint*)&val)); |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
892 break; |
52fed2ec0afb
6667620: (Escape Analysis) fix deoptimization for scalar replaced objects
kvn
parents:
0
diff
changeset
|
893 |
0 | 894 default: |
895 ShouldNotReachHere(); | |
896 } | |
897 index++; | |
898 } | |
899 } | |
900 | |
901 | |
902 // restore fields of an eliminated object array | |
903 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) { | |
904 for (int i = 0; i < sv->field_size(); i++) { | |
905 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); | |
906 assert(value->type() == T_OBJECT, "object element expected"); | |
907 obj->obj_at_put(i, value->get_obj()()); | |
908 } | |
909 } | |
910 | |
911 | |
912 // restore fields of all eliminated objects and arrays | |
913 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) { | |
914 for (int i = 0; i < objects->length(); i++) { | |
915 ObjectValue* sv = (ObjectValue*) objects->at(i); | |
916 KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()()); | |
917 Handle obj = sv->value(); | |
918 assert(obj.not_null(), "reallocation was missed"); | |
919 | |
920 if (k->oop_is_instance()) { | |
921 instanceKlass* ik = instanceKlass::cast(k()); | |
922 FieldReassigner reassign(fr, reg_map, sv, obj()); | |
923 ik->do_nonstatic_fields(&reassign); | |
924 } else if (k->oop_is_typeArray()) { | |
925 typeArrayKlass* ak = typeArrayKlass::cast(k()); | |
926 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type()); | |
927 } else if (k->oop_is_objArray()) { | |
928 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj()); | |
929 } | |
930 } | |
931 } | |
932 | |
933 | |
934 // relock objects for which synchronization was eliminated | |
83
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
935 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread) { |
0 | 936 for (int i = 0; i < monitors->length(); i++) { |
83
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
937 MonitorInfo* mon_info = monitors->at(i); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
938 if (mon_info->eliminated()) { |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
939 assert(mon_info->owner() != NULL, "reallocation was missed"); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
940 Handle obj = Handle(mon_info->owner()); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
941 markOop mark = obj->mark(); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
942 if (UseBiasedLocking && mark->has_bias_pattern()) { |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
943 // New allocated objects may have the mark set to anonymously biased. |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
944 // Also the deoptimized method may called methods with synchronization |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
945 // where the thread-local object is bias locked to the current thread. |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
946 assert(mark->is_biased_anonymously() || |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
947 mark->biased_locker() == thread, "should be locked to current thread"); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
948 // Reset mark word to unbiased prototype. |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
949 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
950 obj->set_mark(unbiased_prototype); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
951 } |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
952 BasicLock* lock = mon_info->lock(); |
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
953 ObjectSynchronizer::slow_enter(obj, lock, thread); |
0 | 954 } |
83
d3cd40645d0d
6681646: Relocking of a scalar replaced object during deoptimization is broken
kvn
parents:
44
diff
changeset
|
955 assert(mon_info->owner()->is_locked(), "object must be locked now"); |
0 | 956 } |
957 } | |
958 | |
959 | |
960 #ifndef PRODUCT | |
961 // print information about reallocated objects | |
962 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) { | |
963 fieldDescriptor fd; | |
964 | |
965 for (int i = 0; i < objects->length(); i++) { | |
966 ObjectValue* sv = (ObjectValue*) objects->at(i); | |
967 KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()()); | |
968 Handle obj = sv->value(); | |
969 | |
970 tty->print(" object <" INTPTR_FORMAT "> of type ", sv->value()()); | |
971 k->as_klassOop()->print_value(); | |
972 tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); | |
973 tty->cr(); | |
974 | |
975 if (Verbose) { | |
976 k->oop_print_on(obj(), tty); | |
977 } | |
978 } | |
979 } | |
980 #endif | |
981 #endif // COMPILER2 | |
982 | |
983 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) { | |
984 | |
985 #ifndef PRODUCT | |
986 if (TraceDeoptimization) { | |
987 ttyLocker ttyl; | |
988 tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", thread); | |
989 fr.print_on(tty); | |
990 tty->print_cr(" Virtual frames (innermost first):"); | |
991 for (int index = 0; index < chunk->length(); index++) { | |
992 compiledVFrame* vf = chunk->at(index); | |
993 tty->print(" %2d - ", index); | |
994 vf->print_value(); | |
995 int bci = chunk->at(index)->raw_bci(); | |
996 const char* code_name; | |
997 if (bci == SynchronizationEntryBCI) { | |
998 code_name = "sync entry"; | |
999 } else { | |
2142 | 1000 Bytecodes::Code code = vf->method()->code_at(bci); |
0 | 1001 code_name = Bytecodes::name(code); |
1002 } | |
1003 tty->print(" - %s", code_name); | |
1004 tty->print_cr(" @ bci %d ", bci); | |
1005 if (Verbose) { | |
1006 vf->print(); | |
1007 tty->cr(); | |
1008 } | |
1009 } | |
1010 } | |
1011 #endif | |
1012 | |
1013 // Register map for next frame (used for stack crawl). We capture | |
1014 // the state of the deopt'ing frame's caller. Thus if we need to | |
1015 // stuff a C2I adapter we can properly fill in the callee-save | |
1016 // register locations. | |
1017 frame caller = fr.sender(reg_map); | |
1018 int frame_size = caller.sp() - fr.sp(); | |
1019 | |
1020 frame sender = caller; | |
1021 | |
1022 // Since the Java thread being deoptimized will eventually adjust it's own stack, | |
1023 // the vframeArray containing the unpacking information is allocated in the C heap. | |
1024 // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames(). | |
1025 vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr); | |
1026 | |
1027 // Compare the vframeArray to the collected vframes | |
1028 assert(array->structural_compare(thread, chunk), "just checking"); | |
1029 Events::log("# vframes = %d", (intptr_t)chunk->length()); | |
1030 | |
1031 #ifndef PRODUCT | |
1032 if (TraceDeoptimization) { | |
1033 ttyLocker ttyl; | |
1034 tty->print_cr(" Created vframeArray " INTPTR_FORMAT, array); | |
1035 } | |
1036 #endif // PRODUCT | |
1037 | |
1038 return array; | |
1039 } | |
1040 | |
1041 | |
1042 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) { | |
1043 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); | |
1044 for (int i = 0; i < monitors->length(); i++) { | |
1045 MonitorInfo* mon_info = monitors->at(i); | |
818
b109e761e927
6837472: com/sun/jdi/MonitorFrameInfo.java fails with AggressiveOpts in 6u14
kvn
parents:
196
diff
changeset
|
1046 if (!mon_info->eliminated() && mon_info->owner() != NULL) { |
0 | 1047 objects_to_revoke->append(Handle(mon_info->owner())); |
1048 } | |
1049 } | |
1050 } | |
1051 | |
1052 | |
1053 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) { | |
1054 if (!UseBiasedLocking) { | |
1055 return; | |
1056 } | |
1057 | |
1058 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); | |
1059 | |
1060 // Unfortunately we don't have a RegisterMap available in most of | |
1061 // the places we want to call this routine so we need to walk the | |
1062 // stack again to update the register map. | |
1063 if (map == NULL || !map->update_map()) { | |
1064 StackFrameStream sfs(thread, true); | |
1065 bool found = false; | |
1066 while (!found && !sfs.is_done()) { | |
1067 frame* cur = sfs.current(); | |
1068 sfs.next(); | |
1069 found = cur->id() == fr.id(); | |
1070 } | |
1071 assert(found, "frame to be deoptimized not found on target thread's stack"); | |
1072 map = sfs.register_map(); | |
1073 } | |
1074 | |
1075 vframe* vf = vframe::new_vframe(&fr, map, thread); | |
1076 compiledVFrame* cvf = compiledVFrame::cast(vf); | |
1077 // Revoke monitors' biases in all scopes | |
1078 while (!cvf->is_top()) { | |
1079 collect_monitors(cvf, objects_to_revoke); | |
1080 cvf = compiledVFrame::cast(cvf->sender()); | |
1081 } | |
1082 collect_monitors(cvf, objects_to_revoke); | |
1083 | |
1084 if (SafepointSynchronize::is_at_safepoint()) { | |
1085 BiasedLocking::revoke_at_safepoint(objects_to_revoke); | |
1086 } else { | |
1087 BiasedLocking::revoke(objects_to_revoke); | |
1088 } | |
1089 } | |
1090 | |
1091 | |
1092 void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) { | |
1093 if (!UseBiasedLocking) { | |
1094 return; | |
1095 } | |
1096 | |
1097 assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint"); | |
1098 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); | |
1099 for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) { | |
1100 if (jt->has_last_Java_frame()) { | |
1101 StackFrameStream sfs(jt, true); | |
1102 while (!sfs.is_done()) { | |
1103 frame* cur = sfs.current(); | |
1104 if (cb->contains(cur->pc())) { | |
1105 vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt); | |
1106 compiledVFrame* cvf = compiledVFrame::cast(vf); | |
1107 // Revoke monitors' biases in all scopes | |
1108 while (!cvf->is_top()) { | |
1109 collect_monitors(cvf, objects_to_revoke); | |
1110 cvf = compiledVFrame::cast(cvf->sender()); | |
1111 } | |
1112 collect_monitors(cvf, objects_to_revoke); | |
1113 } | |
1114 sfs.next(); | |
1115 } | |
1116 } | |
1117 } | |
1118 BiasedLocking::revoke_at_safepoint(objects_to_revoke); | |
1119 } | |
1120 | |
1121 | |
1122 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr) { | |
1123 assert(fr.can_be_deoptimized(), "checking frame type"); | |
1124 | |
1125 gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal); | |
1126 | |
1127 EventMark m("Deoptimization (pc=" INTPTR_FORMAT ", sp=" INTPTR_FORMAT ")", fr.pc(), fr.id()); | |
1128 | |
1129 // Patch the nmethod so that when execution returns to it we will | |
1130 // deopt the execution state and return to the interpreter. | |
1131 fr.deoptimize(thread); | |
1132 } | |
1133 | |
1134 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) { | |
1135 // Deoptimize only if the frame comes from compile code. | |
1136 // Do not deoptimize the frame which is already patched | |
1137 // during the execution of the loops below. | |
1138 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) { | |
1139 return; | |
1140 } | |
1141 ResourceMark rm; | |
1142 DeoptimizationMarker dm; | |
1143 if (UseBiasedLocking) { | |
1144 revoke_biases_of_monitors(thread, fr, map); | |
1145 } | |
1146 deoptimize_single_frame(thread, fr); | |
1147 | |
1148 } | |
1149 | |
1150 | |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1151 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id) { |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1152 assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(), |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1153 "can only deoptimize other thread at a safepoint"); |
0 | 1154 // Compute frame and register map based on thread and sp. |
1155 RegisterMap reg_map(thread, UseBiasedLocking); | |
1156 frame fr = thread->last_frame(); | |
1157 while (fr.id() != id) { | |
1158 fr = fr.sender(®_map); | |
1159 } | |
1160 deoptimize(thread, fr, ®_map); | |
1161 } | |
1162 | |
1163 | |
1905
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1164 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) { |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1165 if (thread == Thread::current()) { |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1166 Deoptimization::deoptimize_frame_internal(thread, id); |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1167 } else { |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1168 VM_DeoptimizeFrame deopt(thread, id); |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1169 VMThread::execute(&deopt); |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1170 } |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1171 } |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1172 |
ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
never
parents:
1814
diff
changeset
|
1173 |
0 | 1174 // JVMTI PopFrame support |
1175 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address)) | |
1176 { | |
1177 thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address); | |
1178 } | |
1179 JRT_END | |
1180 | |
1181 | |
1692 | 1182 #if defined(COMPILER2) || defined(SHARK) |
0 | 1183 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) { |
1184 // in case of an unresolved klass entry, load the class. | |
1185 if (constant_pool->tag_at(index).is_unresolved_klass()) { | |
1186 klassOop tk = constant_pool->klass_at(index, CHECK); | |
1187 return; | |
1188 } | |
1189 | |
1190 if (!constant_pool->tag_at(index).is_symbol()) return; | |
1191 | |
1192 Handle class_loader (THREAD, instanceKlass::cast(constant_pool->pool_holder())->class_loader()); | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2142
diff
changeset
|
1193 Symbol* symbol = constant_pool->symbol_at(index); |
0 | 1194 |
1195 // class name? | |
1196 if (symbol->byte_at(0) != '(') { | |
1197 Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain()); | |
1198 SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK); | |
1199 return; | |
1200 } | |
1201 | |
1202 // then it must be a signature! | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2142
diff
changeset
|
1203 ResourceMark rm(THREAD); |
0 | 1204 for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) { |
1205 if (ss.is_object()) { | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2142
diff
changeset
|
1206 Symbol* class_name = ss.as_symbol(CHECK); |
0 | 1207 Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain()); |
1208 SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK); | |
1209 } | |
1210 } | |
1211 } | |
1212 | |
1213 | |
1214 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index) { | |
1215 EXCEPTION_MARK; | |
1216 load_class_by_index(constant_pool, index, THREAD); | |
1217 if (HAS_PENDING_EXCEPTION) { | |
1218 // Exception happened during classloading. We ignore the exception here, since it | |
1219 // is going to be rethrown since the current activation is going to be deoptimzied and | |
1220 // the interpreter will re-execute the bytecode. | |
1221 CLEAR_PENDING_EXCEPTION; | |
1222 } | |
1223 } | |
1224 | |
1225 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) { | |
1226 HandleMark hm; | |
1227 | |
1228 // uncommon_trap() is called at the beginning of the uncommon trap | |
1229 // handler. Note this fact before we start generating temporary frames | |
1230 // that can confuse an asynchronous stack walker. This counter is | |
1231 // decremented at the end of unpack_frames(). | |
1232 thread->inc_in_deopt_handler(); | |
1233 | |
1234 // We need to update the map if we have biased locking. | |
1235 RegisterMap reg_map(thread, UseBiasedLocking); | |
1236 frame stub_frame = thread->last_frame(); | |
1237 frame fr = stub_frame.sender(®_map); | |
1238 // Make sure the calling nmethod is not getting deoptimized and removed | |
1239 // before we are done with it. | |
1240 nmethodLocker nl(fr.pc()); | |
1241 | |
1242 { | |
1243 ResourceMark rm; | |
1244 | |
1245 // Revoke biases of any monitors in the frame to ensure we can migrate them | |
1246 revoke_biases_of_monitors(thread, fr, ®_map); | |
1247 | |
1248 DeoptReason reason = trap_request_reason(trap_request); | |
1249 DeoptAction action = trap_request_action(trap_request); | |
1250 jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1 | |
1251 | |
1252 Events::log("Uncommon trap occurred @" INTPTR_FORMAT " unloaded_class_index = %d", fr.pc(), (int) trap_request); | |
1253 vframe* vf = vframe::new_vframe(&fr, ®_map, thread); | |
1254 compiledVFrame* cvf = compiledVFrame::cast(vf); | |
1255 | |
1256 nmethod* nm = cvf->code(); | |
1257 | |
1258 ScopeDesc* trap_scope = cvf->scope(); | |
1259 methodHandle trap_method = trap_scope->method(); | |
1260 int trap_bci = trap_scope->bci(); | |
2142 | 1261 Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci); |
0 | 1262 |
1263 // Record this event in the histogram. | |
1264 gather_statistics(reason, action, trap_bc); | |
1265 | |
1266 // Ensure that we can record deopt. history: | |
1267 bool create_if_missing = ProfileTraps; | |
1268 | |
1269 methodDataHandle trap_mdo | |
1270 (THREAD, get_method_data(thread, trap_method, create_if_missing)); | |
1271 | |
1272 // Print a bunch of diagnostics, if requested. | |
1273 if (TraceDeoptimization || LogCompilation) { | |
1274 ResourceMark rm; | |
1275 ttyLocker ttyl; | |
1276 char buf[100]; | |
1277 if (xtty != NULL) { | |
1278 xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT"' %s", | |
1279 os::current_thread_id(), | |
1280 format_trap_request(buf, sizeof(buf), trap_request)); | |
1281 nm->log_identity(xtty); | |
1282 } | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2142
diff
changeset
|
1283 Symbol* class_name = NULL; |
0 | 1284 bool unresolved = false; |
1285 if (unloaded_class_index >= 0) { | |
1286 constantPoolHandle constants (THREAD, trap_method->constants()); | |
1287 if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) { | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2142
diff
changeset
|
1288 class_name = constants->klass_name_at(unloaded_class_index); |
0 | 1289 unresolved = true; |
1290 if (xtty != NULL) | |
1291 xtty->print(" unresolved='1'"); | |
1292 } else if (constants->tag_at(unloaded_class_index).is_symbol()) { | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2142
diff
changeset
|
1293 class_name = constants->symbol_at(unloaded_class_index); |
0 | 1294 } |
1295 if (xtty != NULL) | |
1296 xtty->name(class_name); | |
1297 } | |
1298 if (xtty != NULL && trap_mdo.not_null()) { | |
1299 // Dump the relevant MDO state. | |
1300 // This is the deopt count for the current reason, any previous | |
1301 // reasons or recompiles seen at this point. | |
1302 int dcnt = trap_mdo->trap_count(reason); | |
1303 if (dcnt != 0) | |
1304 xtty->print(" count='%d'", dcnt); | |
1305 ProfileData* pdata = trap_mdo->bci_to_data(trap_bci); | |
1306 int dos = (pdata == NULL)? 0: pdata->trap_state(); | |
1307 if (dos != 0) { | |
1308 xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos)); | |
1309 if (trap_state_is_recompiled(dos)) { | |
1310 int recnt2 = trap_mdo->overflow_recompile_count(); | |
1311 if (recnt2 != 0) | |
1312 xtty->print(" recompiles2='%d'", recnt2); | |
1313 } | |
1314 } | |
1315 } | |
1316 if (xtty != NULL) { | |
1317 xtty->stamp(); | |
1318 xtty->end_head(); | |
1319 } | |
1320 if (TraceDeoptimization) { // make noise on the tty | |
1321 tty->print("Uncommon trap occurred in"); | |
1322 nm->method()->print_short_name(tty); | |
1323 tty->print(" (@" INTPTR_FORMAT ") thread=%d reason=%s action=%s unloaded_class_index=%d", | |
1324 fr.pc(), | |
1325 (int) os::current_thread_id(), | |
1326 trap_reason_name(reason), | |
1327 trap_action_name(action), | |
1328 unloaded_class_index); | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2142
diff
changeset
|
1329 if (class_name != NULL) { |
0 | 1330 tty->print(unresolved ? " unresolved class: " : " symbol: "); |
1331 class_name->print_symbol_on(tty); | |
1332 } | |
1333 tty->cr(); | |
1334 } | |
1335 if (xtty != NULL) { | |
1336 // Log the precise location of the trap. | |
1337 for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) { | |
1338 xtty->begin_elem("jvms bci='%d'", sd->bci()); | |
1339 xtty->method(sd->method()); | |
1340 xtty->end_elem(); | |
1341 if (sd->is_top()) break; | |
1342 } | |
1343 xtty->tail("uncommon_trap"); | |
1344 } | |
1345 } | |
1346 // (End diagnostic printout.) | |
1347 | |
1348 // Load class if necessary | |
1349 if (unloaded_class_index >= 0) { | |
1350 constantPoolHandle constants(THREAD, trap_method->constants()); | |
1351 load_class_by_index(constants, unloaded_class_index); | |
1352 } | |
1353 | |
1354 // Flush the nmethod if necessary and desirable. | |
1355 // | |
1356 // We need to avoid situations where we are re-flushing the nmethod | |
1357 // because of a hot deoptimization site. Repeated flushes at the same | |
1358 // point need to be detected by the compiler and avoided. If the compiler | |
1359 // cannot avoid them (or has a bug and "refuses" to avoid them), this | |
1360 // module must take measures to avoid an infinite cycle of recompilation | |
1361 // and deoptimization. There are several such measures: | |
1362 // | |
1363 // 1. If a recompilation is ordered a second time at some site X | |
1364 // and for the same reason R, the action is adjusted to 'reinterpret', | |
1365 // to give the interpreter time to exercise the method more thoroughly. | |
1366 // If this happens, the method's overflow_recompile_count is incremented. | |
1367 // | |
1368 // 2. If the compiler fails to reduce the deoptimization rate, then | |
1369 // the method's overflow_recompile_count will begin to exceed the set | |
1370 // limit PerBytecodeRecompilationCutoff. If this happens, the action | |
1371 // is adjusted to 'make_not_compilable', and the method is abandoned | |
1372 // to the interpreter. This is a performance hit for hot methods, | |
1373 // but is better than a disastrous infinite cycle of recompilations. | |
1374 // (Actually, only the method containing the site X is abandoned.) | |
1375 // | |
1376 // 3. In parallel with the previous measures, if the total number of | |
1377 // recompilations of a method exceeds the much larger set limit | |
1378 // PerMethodRecompilationCutoff, the method is abandoned. | |
1379 // This should only happen if the method is very large and has | |
1380 // many "lukewarm" deoptimizations. The code which enforces this | |
1381 // limit is elsewhere (class nmethod, class methodOopDesc). | |
1382 // | |
1383 // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance | |
1384 // to recompile at each bytecode independently of the per-BCI cutoff. | |
1385 // | |
1386 // The decision to update code is up to the compiler, and is encoded | |
1387 // in the Action_xxx code. If the compiler requests Action_none | |
1388 // no trap state is changed, no compiled code is changed, and the | |
1389 // computation suffers along in the interpreter. | |
1390 // | |
1391 // The other action codes specify various tactics for decompilation | |
1392 // and recompilation. Action_maybe_recompile is the loosest, and | |
1393 // allows the compiled code to stay around until enough traps are seen, | |
1394 // and until the compiler gets around to recompiling the trapping method. | |
1395 // | |
1396 // The other actions cause immediate removal of the present code. | |
1397 | |
1398 bool update_trap_state = true; | |
1399 bool make_not_entrant = false; | |
1400 bool make_not_compilable = false; | |
1783 | 1401 bool reprofile = false; |
0 | 1402 switch (action) { |
1403 case Action_none: | |
1404 // Keep the old code. | |
1405 update_trap_state = false; | |
1406 break; | |
1407 case Action_maybe_recompile: | |
1408 // Do not need to invalidate the present code, but we can | |
1409 // initiate another | |
1410 // Start compiler without (necessarily) invalidating the nmethod. | |
1411 // The system will tolerate the old code, but new code should be | |
1412 // generated when possible. | |
1413 break; | |
1414 case Action_reinterpret: | |
1415 // Go back into the interpreter for a while, and then consider | |
1416 // recompiling form scratch. | |
1417 make_not_entrant = true; | |
1418 // Reset invocation counter for outer most method. | |
1419 // This will allow the interpreter to exercise the bytecodes | |
1420 // for a while before recompiling. | |
1421 // By contrast, Action_make_not_entrant is immediate. | |
1422 // | |
1423 // Note that the compiler will track null_check, null_assert, | |
1424 // range_check, and class_check events and log them as if they | |
1425 // had been traps taken from compiled code. This will update | |
1426 // the MDO trap history so that the next compilation will | |
1427 // properly detect hot trap sites. | |
1783 | 1428 reprofile = true; |
0 | 1429 break; |
1430 case Action_make_not_entrant: | |
1431 // Request immediate recompilation, and get rid of the old code. | |
1432 // Make them not entrant, so next time they are called they get | |
1433 // recompiled. Unloaded classes are loaded now so recompile before next | |
1434 // time they are called. Same for uninitialized. The interpreter will | |
1435 // link the missing class, if any. | |
1436 make_not_entrant = true; | |
1437 break; | |
1438 case Action_make_not_compilable: | |
1439 // Give up on compiling this method at all. | |
1440 make_not_entrant = true; | |
1441 make_not_compilable = true; | |
1442 break; | |
1443 default: | |
1444 ShouldNotReachHere(); | |
1445 } | |
1446 | |
1447 // Setting +ProfileTraps fixes the following, on all platforms: | |
1448 // 4852688: ProfileInterpreter is off by default for ia64. The result is | |
1449 // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the | |
1450 // recompile relies on a methodDataOop to record heroic opt failures. | |
1451 | |
1452 // Whether the interpreter is producing MDO data or not, we also need | |
1453 // to use the MDO to detect hot deoptimization points and control | |
1454 // aggressive optimization. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1455 bool inc_recompile_count = false; |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1456 ProfileData* pdata = NULL; |
0 | 1457 if (ProfileTraps && update_trap_state && trap_mdo.not_null()) { |
1458 assert(trap_mdo() == get_method_data(thread, trap_method, false), "sanity"); | |
1459 uint this_trap_count = 0; | |
1460 bool maybe_prior_trap = false; | |
1461 bool maybe_prior_recompile = false; | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1462 pdata = query_update_method_data(trap_mdo, trap_bci, reason, |
0 | 1463 //outputs: |
1464 this_trap_count, | |
1465 maybe_prior_trap, | |
1466 maybe_prior_recompile); | |
1467 // Because the interpreter also counts null, div0, range, and class | |
1468 // checks, these traps from compiled code are double-counted. | |
1469 // This is harmless; it just means that the PerXTrapLimit values | |
1470 // are in effect a little smaller than they look. | |
1471 | |
1472 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); | |
1473 if (per_bc_reason != Reason_none) { | |
1474 // Now take action based on the partially known per-BCI history. | |
1475 if (maybe_prior_trap | |
1476 && this_trap_count >= (uint)PerBytecodeTrapLimit) { | |
1477 // If there are too many traps at this BCI, force a recompile. | |
1478 // This will allow the compiler to see the limit overflow, and | |
1479 // take corrective action, if possible. The compiler generally | |
1480 // does not use the exact PerBytecodeTrapLimit value, but instead | |
1481 // changes its tactics if it sees any traps at all. This provides | |
1482 // a little hysteresis, delaying a recompile until a trap happens | |
1483 // several times. | |
1484 // | |
1485 // Actually, since there is only one bit of counter per BCI, | |
1486 // the possible per-BCI counts are {0,1,(per-method count)}. | |
1487 // This produces accurate results if in fact there is only | |
1488 // one hot trap site, but begins to get fuzzy if there are | |
1489 // many sites. For example, if there are ten sites each | |
1490 // trapping two or more times, they each get the blame for | |
1491 // all of their traps. | |
1492 make_not_entrant = true; | |
1493 } | |
1494 | |
1495 // Detect repeated recompilation at the same BCI, and enforce a limit. | |
1496 if (make_not_entrant && maybe_prior_recompile) { | |
1497 // More than one recompile at this point. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1498 inc_recompile_count = maybe_prior_trap; |
0 | 1499 } |
1500 } else { | |
1501 // For reasons which are not recorded per-bytecode, we simply | |
1502 // force recompiles unconditionally. | |
1503 // (Note that PerMethodRecompilationCutoff is enforced elsewhere.) | |
1504 make_not_entrant = true; | |
1505 } | |
1506 | |
1507 // Go back to the compiler if there are too many traps in this method. | |
1508 if (this_trap_count >= (uint)PerMethodTrapLimit) { | |
1509 // If there are too many traps in this method, force a recompile. | |
1510 // This will allow the compiler to see the limit overflow, and | |
1511 // take corrective action, if possible. | |
1512 // (This condition is an unlikely backstop only, because the | |
1513 // PerBytecodeTrapLimit is more likely to take effect first, | |
1514 // if it is applicable.) | |
1515 make_not_entrant = true; | |
1516 } | |
1517 | |
1518 // Here's more hysteresis: If there has been a recompile at | |
1519 // this trap point already, run the method in the interpreter | |
1520 // for a while to exercise it more thoroughly. | |
1521 if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) { | |
1783 | 1522 reprofile = true; |
0 | 1523 } |
1524 | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1525 } |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1526 |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1527 // Take requested actions on the method: |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1528 |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1529 // Recompile |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1530 if (make_not_entrant) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1531 if (!nm->make_not_entrant()) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1532 return; // the call did not change nmethod's state |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1533 } |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1534 |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1535 if (pdata != NULL) { |
0 | 1536 // Record the recompilation event, if any. |
1537 int tstate0 = pdata->trap_state(); | |
1538 int tstate1 = trap_state_set_recompiled(tstate0, true); | |
1539 if (tstate1 != tstate0) | |
1540 pdata->set_trap_state(tstate1); | |
1541 } | |
1542 } | |
1543 | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1544 if (inc_recompile_count) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1545 trap_mdo->inc_overflow_recompile_count(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1546 if ((uint)trap_mdo->overflow_recompile_count() > |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1547 (uint)PerBytecodeRecompilationCutoff) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1548 // Give up on the method containing the bad BCI. |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1549 if (trap_method() == nm->method()) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1550 make_not_compilable = true; |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1551 } else { |
1783 | 1552 trap_method->set_not_compilable(CompLevel_full_optimization); |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1553 // But give grace to the enclosing nm->method(). |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1554 } |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1555 } |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1556 } |
0 | 1557 |
1783 | 1558 // Reprofile |
1559 if (reprofile) { | |
1560 CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method()); | |
0 | 1561 } |
1562 | |
1563 // Give up compiling | |
1783 | 1564 if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) { |
0 | 1565 assert(make_not_entrant, "consistent"); |
1783 | 1566 nm->method()->set_not_compilable(CompLevel_full_optimization); |
0 | 1567 } |
1568 | |
1569 } // Free marked resources | |
1570 | |
1571 } | |
1572 JRT_END | |
1573 | |
1574 methodDataOop | |
1575 Deoptimization::get_method_data(JavaThread* thread, methodHandle m, | |
1576 bool create_if_missing) { | |
1577 Thread* THREAD = thread; | |
1578 methodDataOop mdo = m()->method_data(); | |
1579 if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) { | |
1580 // Build an MDO. Ignore errors like OutOfMemory; | |
1581 // that simply means we won't have an MDO to update. | |
1582 methodOopDesc::build_interpreter_method_data(m, THREAD); | |
1583 if (HAS_PENDING_EXCEPTION) { | |
1584 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); | |
1585 CLEAR_PENDING_EXCEPTION; | |
1586 } | |
1587 mdo = m()->method_data(); | |
1588 } | |
1589 return mdo; | |
1590 } | |
1591 | |
1592 ProfileData* | |
1593 Deoptimization::query_update_method_data(methodDataHandle trap_mdo, | |
1594 int trap_bci, | |
1595 Deoptimization::DeoptReason reason, | |
1596 //outputs: | |
1597 uint& ret_this_trap_count, | |
1598 bool& ret_maybe_prior_trap, | |
1599 bool& ret_maybe_prior_recompile) { | |
1600 uint prior_trap_count = trap_mdo->trap_count(reason); | |
1601 uint this_trap_count = trap_mdo->inc_trap_count(reason); | |
1602 | |
1603 // If the runtime cannot find a place to store trap history, | |
1604 // it is estimated based on the general condition of the method. | |
1605 // If the method has ever been recompiled, or has ever incurred | |
1606 // a trap with the present reason , then this BCI is assumed | |
1607 // (pessimistically) to be the culprit. | |
1608 bool maybe_prior_trap = (prior_trap_count != 0); | |
1609 bool maybe_prior_recompile = (trap_mdo->decompile_count() != 0); | |
1610 ProfileData* pdata = NULL; | |
1611 | |
1612 | |
1613 // For reasons which are recorded per bytecode, we check per-BCI data. | |
1614 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); | |
1615 if (per_bc_reason != Reason_none) { | |
1616 // Find the profile data for this BCI. If there isn't one, | |
1617 // try to allocate one from the MDO's set of spares. | |
1618 // This will let us detect a repeated trap at this point. | |
1619 pdata = trap_mdo->allocate_bci_to_data(trap_bci); | |
1620 | |
1621 if (pdata != NULL) { | |
1622 // Query the trap state of this profile datum. | |
1623 int tstate0 = pdata->trap_state(); | |
1624 if (!trap_state_has_reason(tstate0, per_bc_reason)) | |
1625 maybe_prior_trap = false; | |
1626 if (!trap_state_is_recompiled(tstate0)) | |
1627 maybe_prior_recompile = false; | |
1628 | |
1629 // Update the trap state of this profile datum. | |
1630 int tstate1 = tstate0; | |
1631 // Record the reason. | |
1632 tstate1 = trap_state_add_reason(tstate1, per_bc_reason); | |
1633 // Store the updated state on the MDO, for next time. | |
1634 if (tstate1 != tstate0) | |
1635 pdata->set_trap_state(tstate1); | |
1636 } else { | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1637 if (LogCompilation && xtty != NULL) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1638 ttyLocker ttyl; |
0 | 1639 // Missing MDP? Leave a small complaint in the log. |
1640 xtty->elem("missing_mdp bci='%d'", trap_bci); | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1641 } |
0 | 1642 } |
1643 } | |
1644 | |
1645 // Return results: | |
1646 ret_this_trap_count = this_trap_count; | |
1647 ret_maybe_prior_trap = maybe_prior_trap; | |
1648 ret_maybe_prior_recompile = maybe_prior_recompile; | |
1649 return pdata; | |
1650 } | |
1651 | |
1652 void | |
1653 Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) { | |
1654 ResourceMark rm; | |
1655 // Ignored outputs: | |
1656 uint ignore_this_trap_count; | |
1657 bool ignore_maybe_prior_trap; | |
1658 bool ignore_maybe_prior_recompile; | |
1659 query_update_method_data(trap_mdo, trap_bci, | |
1660 (DeoptReason)reason, | |
1661 ignore_this_trap_count, | |
1662 ignore_maybe_prior_trap, | |
1663 ignore_maybe_prior_recompile); | |
1664 } | |
1665 | |
1666 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) { | |
1667 | |
1668 // Still in Java no safepoints | |
1669 { | |
1670 // This enters VM and may safepoint | |
1671 uncommon_trap_inner(thread, trap_request); | |
1672 } | |
1673 return fetch_unroll_info_helper(thread); | |
1674 } | |
1675 | |
1676 // Local derived constants. | |
1677 // Further breakdown of DataLayout::trap_state, as promised by DataLayout. | |
1678 const int DS_REASON_MASK = DataLayout::trap_mask >> 1; | |
1679 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK; | |
1680 | |
1681 //---------------------------trap_state_reason--------------------------------- | |
1682 Deoptimization::DeoptReason | |
1683 Deoptimization::trap_state_reason(int trap_state) { | |
1684 // This assert provides the link between the width of DataLayout::trap_bits | |
1685 // and the encoding of "recorded" reasons. It ensures there are enough | |
1686 // bits to store all needed reasons in the per-BCI MDO profile. | |
1687 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); | |
1688 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); | |
1689 trap_state -= recompile_bit; | |
1690 if (trap_state == DS_REASON_MASK) { | |
1691 return Reason_many; | |
1692 } else { | |
1693 assert((int)Reason_none == 0, "state=0 => Reason_none"); | |
1694 return (DeoptReason)trap_state; | |
1695 } | |
1696 } | |
1697 //-------------------------trap_state_has_reason------------------------------- | |
1698 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { | |
1699 assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason"); | |
1700 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); | |
1701 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); | |
1702 trap_state -= recompile_bit; | |
1703 if (trap_state == DS_REASON_MASK) { | |
1704 return -1; // true, unspecifically (bottom of state lattice) | |
1705 } else if (trap_state == reason) { | |
1706 return 1; // true, definitely | |
1707 } else if (trap_state == 0) { | |
1708 return 0; // false, definitely (top of state lattice) | |
1709 } else { | |
1710 return 0; // false, definitely | |
1711 } | |
1712 } | |
1713 //-------------------------trap_state_add_reason------------------------------- | |
1714 int Deoptimization::trap_state_add_reason(int trap_state, int reason) { | |
1715 assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason"); | |
1716 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); | |
1717 trap_state -= recompile_bit; | |
1718 if (trap_state == DS_REASON_MASK) { | |
1719 return trap_state + recompile_bit; // already at state lattice bottom | |
1720 } else if (trap_state == reason) { | |
1721 return trap_state + recompile_bit; // the condition is already true | |
1722 } else if (trap_state == 0) { | |
1723 return reason + recompile_bit; // no condition has yet been true | |
1724 } else { | |
1725 return DS_REASON_MASK + recompile_bit; // fall to state lattice bottom | |
1726 } | |
1727 } | |
1728 //-----------------------trap_state_is_recompiled------------------------------ | |
1729 bool Deoptimization::trap_state_is_recompiled(int trap_state) { | |
1730 return (trap_state & DS_RECOMPILE_BIT) != 0; | |
1731 } | |
1732 //-----------------------trap_state_set_recompiled----------------------------- | |
1733 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) { | |
1734 if (z) return trap_state | DS_RECOMPILE_BIT; | |
1735 else return trap_state & ~DS_RECOMPILE_BIT; | |
1736 } | |
1737 //---------------------------format_trap_state--------------------------------- | |
1738 // This is used for debugging and diagnostics, including hotspot.log output. | |
1739 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, | |
1740 int trap_state) { | |
1741 DeoptReason reason = trap_state_reason(trap_state); | |
1742 bool recomp_flag = trap_state_is_recompiled(trap_state); | |
1743 // Re-encode the state from its decoded components. | |
1744 int decoded_state = 0; | |
1745 if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many) | |
1746 decoded_state = trap_state_add_reason(decoded_state, reason); | |
1747 if (recomp_flag) | |
1748 decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag); | |
1749 // If the state re-encodes properly, format it symbolically. | |
1750 // Because this routine is used for debugging and diagnostics, | |
1751 // be robust even if the state is a strange value. | |
1752 size_t len; | |
1753 if (decoded_state != trap_state) { | |
1754 // Random buggy state that doesn't decode?? | |
1755 len = jio_snprintf(buf, buflen, "#%d", trap_state); | |
1756 } else { | |
1757 len = jio_snprintf(buf, buflen, "%s%s", | |
1758 trap_reason_name(reason), | |
1759 recomp_flag ? " recompiled" : ""); | |
1760 } | |
1761 if (len >= buflen) | |
1762 buf[buflen-1] = '\0'; | |
1763 return buf; | |
1764 } | |
1765 | |
1766 | |
1767 //--------------------------------statics-------------------------------------- | |
1768 Deoptimization::DeoptAction Deoptimization::_unloaded_action | |
1769 = Deoptimization::Action_reinterpret; | |
1770 const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = { | |
1771 // Note: Keep this in sync. with enum DeoptReason. | |
1772 "none", | |
1773 "null_check", | |
1774 "null_assert", | |
1775 "range_check", | |
1776 "class_check", | |
1777 "array_check", | |
1778 "intrinsic", | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
1204
diff
changeset
|
1779 "bimorphic", |
0 | 1780 "unloaded", |
1781 "uninitialized", | |
1782 "unreached", | |
1783 "unhandled", | |
1784 "constraint", | |
1785 "div0_check", | |
1172 | 1786 "age", |
3345 | 1787 "predicate", |
1788 "loop_limit_check" | |
0 | 1789 }; |
1790 const char* Deoptimization::_trap_action_name[Action_LIMIT] = { | |
1791 // Note: Keep this in sync. with enum DeoptAction. | |
1792 "none", | |
1793 "maybe_recompile", | |
1794 "reinterpret", | |
1795 "make_not_entrant", | |
1796 "make_not_compilable" | |
1797 }; | |
1798 | |
1799 const char* Deoptimization::trap_reason_name(int reason) { | |
1800 if (reason == Reason_many) return "many"; | |
1801 if ((uint)reason < Reason_LIMIT) | |
1802 return _trap_reason_name[reason]; | |
1803 static char buf[20]; | |
1804 sprintf(buf, "reason%d", reason); | |
1805 return buf; | |
1806 } | |
1807 const char* Deoptimization::trap_action_name(int action) { | |
1808 if ((uint)action < Action_LIMIT) | |
1809 return _trap_action_name[action]; | |
1810 static char buf[20]; | |
1811 sprintf(buf, "action%d", action); | |
1812 return buf; | |
1813 } | |
1814 | |
1815 // This is used for debugging and diagnostics, including hotspot.log output. | |
1816 const char* Deoptimization::format_trap_request(char* buf, size_t buflen, | |
1817 int trap_request) { | |
1818 jint unloaded_class_index = trap_request_index(trap_request); | |
1819 const char* reason = trap_reason_name(trap_request_reason(trap_request)); | |
1820 const char* action = trap_action_name(trap_request_action(trap_request)); | |
1821 size_t len; | |
1822 if (unloaded_class_index < 0) { | |
1823 len = jio_snprintf(buf, buflen, "reason='%s' action='%s'", | |
1824 reason, action); | |
1825 } else { | |
1826 len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'", | |
1827 reason, action, unloaded_class_index); | |
1828 } | |
1829 if (len >= buflen) | |
1830 buf[buflen-1] = '\0'; | |
1831 return buf; | |
1832 } | |
1833 | |
1834 juint Deoptimization::_deoptimization_hist | |
1835 [Deoptimization::Reason_LIMIT] | |
1836 [1 + Deoptimization::Action_LIMIT] | |
1837 [Deoptimization::BC_CASE_LIMIT] | |
1838 = {0}; | |
1839 | |
1840 enum { | |
1841 LSB_BITS = 8, | |
1842 LSB_MASK = right_n_bits(LSB_BITS) | |
1843 }; | |
1844 | |
1845 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, | |
1846 Bytecodes::Code bc) { | |
1847 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); | |
1848 assert(action >= 0 && action < Action_LIMIT, "oob"); | |
1849 _deoptimization_hist[Reason_none][0][0] += 1; // total | |
1850 _deoptimization_hist[reason][0][0] += 1; // per-reason total | |
1851 juint* cases = _deoptimization_hist[reason][1+action]; | |
1852 juint* bc_counter_addr = NULL; | |
1853 juint bc_counter = 0; | |
1854 // Look for an unused counter, or an exact match to this BC. | |
1855 if (bc != Bytecodes::_illegal) { | |
1856 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { | |
1857 juint* counter_addr = &cases[bc_case]; | |
1858 juint counter = *counter_addr; | |
1859 if ((counter == 0 && bc_counter_addr == NULL) | |
1860 || (Bytecodes::Code)(counter & LSB_MASK) == bc) { | |
1861 // this counter is either free or is already devoted to this BC | |
1862 bc_counter_addr = counter_addr; | |
1863 bc_counter = counter | bc; | |
1864 } | |
1865 } | |
1866 } | |
1867 if (bc_counter_addr == NULL) { | |
1868 // Overflow, or no given bytecode. | |
1869 bc_counter_addr = &cases[BC_CASE_LIMIT-1]; | |
1870 bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB | |
1871 } | |
1872 *bc_counter_addr = bc_counter + (1 << LSB_BITS); | |
1873 } | |
1874 | |
1875 jint Deoptimization::total_deoptimization_count() { | |
1876 return _deoptimization_hist[Reason_none][0][0]; | |
1877 } | |
1878 | |
1879 jint Deoptimization::deoptimization_count(DeoptReason reason) { | |
1880 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); | |
1881 return _deoptimization_hist[reason][0][0]; | |
1882 } | |
1883 | |
1884 void Deoptimization::print_statistics() { | |
1885 juint total = total_deoptimization_count(); | |
1886 juint account = total; | |
1887 if (total != 0) { | |
1888 ttyLocker ttyl; | |
1889 if (xtty != NULL) xtty->head("statistics type='deoptimization'"); | |
1890 tty->print_cr("Deoptimization traps recorded:"); | |
1891 #define PRINT_STAT_LINE(name, r) \ | |
1892 tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name); | |
1893 PRINT_STAT_LINE("total", total); | |
1894 // For each non-zero entry in the histogram, print the reason, | |
1895 // the action, and (if specifically known) the type of bytecode. | |
1896 for (int reason = 0; reason < Reason_LIMIT; reason++) { | |
1897 for (int action = 0; action < Action_LIMIT; action++) { | |
1898 juint* cases = _deoptimization_hist[reason][1+action]; | |
1899 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { | |
1900 juint counter = cases[bc_case]; | |
1901 if (counter != 0) { | |
1902 char name[1*K]; | |
1903 Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK); | |
1904 if (bc_case == BC_CASE_LIMIT && (int)bc == 0) | |
1905 bc = Bytecodes::_illegal; | |
1906 sprintf(name, "%s/%s/%s", | |
1907 trap_reason_name(reason), | |
1908 trap_action_name(action), | |
1909 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other"); | |
1910 juint r = counter >> LSB_BITS; | |
1911 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total); | |
1912 account -= r; | |
1913 } | |
1914 } | |
1915 } | |
1916 } | |
1917 if (account != 0) { | |
1918 PRINT_STAT_LINE("unaccounted", account); | |
1919 } | |
1920 #undef PRINT_STAT_LINE | |
1921 if (xtty != NULL) xtty->tail("statistics"); | |
1922 } | |
1923 } | |
1692 | 1924 #else // COMPILER2 || SHARK |
0 | 1925 |
1926 | |
1927 // Stubs for C1 only system. | |
1928 bool Deoptimization::trap_state_is_recompiled(int trap_state) { | |
1929 return false; | |
1930 } | |
1931 | |
1932 const char* Deoptimization::trap_reason_name(int reason) { | |
1933 return "unknown"; | |
1934 } | |
1935 | |
1936 void Deoptimization::print_statistics() { | |
1937 // no output | |
1938 } | |
1939 | |
1940 void | |
1941 Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) { | |
1942 // no udpate | |
1943 } | |
1944 | |
1945 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { | |
1946 return 0; | |
1947 } | |
1948 | |
1949 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, | |
1950 Bytecodes::Code bc) { | |
1951 // no update | |
1952 } | |
1953 | |
1954 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, | |
1955 int trap_state) { | |
1956 jio_snprintf(buf, buflen, "#%d", trap_state); | |
1957 return buf; | |
1958 } | |
1959 | |
1692 | 1960 #endif // COMPILER2 || SHARK |