Mercurial > hg > truffle
annotate src/share/vm/code/compiledIC.cpp @ 1604:b918d354830a
6960865: ldc of unloaded class throws an assert in ciTypeFlow
Summary: Support java_mirror for unloaded klasses, arrays as well as instances. Simplify ciTypeFlow by removing unused path.
Reviewed-by: kvn
author | jrose |
---|---|
date | Sat, 12 Jun 2010 22:53:43 -0700 |
parents | e9ff18c4ace7 |
children | 2d26b0046e0d f95d63e2154a |
rev | line source |
---|---|
0 | 1 /* |
1579 | 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
0
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
0
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
0
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_compiledIC.cpp.incl" | |
27 | |
28 | |
29 // Every time a compiled IC is changed or its type is being accessed, | |
30 // either the CompiledIC_lock must be set or we must be at a safe point. | |
31 | |
32 //----------------------------------------------------------------------------- | |
33 // Low-level access to an inline cache. Private, since they might not be | |
34 // MT-safe to use. | |
35 | |
36 void CompiledIC::set_cached_oop(oop cache) { | |
37 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
38 assert (!is_optimized(), "an optimized virtual call does not have a cached oop"); | |
39 assert (cache == NULL || cache != badOop, "invalid oop"); | |
40 | |
41 if (TraceCompiledIC) { | |
42 tty->print(" "); | |
43 print_compiled_ic(); | |
44 tty->print_cr(" changing oop to " INTPTR_FORMAT, (address)cache); | |
45 } | |
46 | |
47 if (cache == NULL) cache = (oop)Universe::non_oop_word(); | |
48 | |
49 *_oop_addr = cache; | |
50 // fix up the relocations | |
51 RelocIterator iter = _oops; | |
52 while (iter.next()) { | |
53 if (iter.type() == relocInfo::oop_type) { | |
54 oop_Relocation* r = iter.oop_reloc(); | |
55 if (r->oop_addr() == _oop_addr) | |
56 r->fix_oop_relocation(); | |
57 } | |
58 } | |
59 return; | |
60 } | |
61 | |
62 | |
63 oop CompiledIC::cached_oop() const { | |
64 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
65 assert (!is_optimized(), "an optimized virtual call does not have a cached oop"); | |
66 | |
67 if (!is_in_transition_state()) { | |
68 oop data = *_oop_addr; | |
69 // If we let the oop value here be initialized to zero... | |
70 assert(data != NULL || Universe::non_oop_word() == NULL, | |
71 "no raw nulls in CompiledIC oops, because of patching races"); | |
72 return (data == (oop)Universe::non_oop_word()) ? (oop)NULL : data; | |
73 } else { | |
74 return InlineCacheBuffer::cached_oop_for((CompiledIC *)this); | |
75 } | |
76 } | |
77 | |
78 | |
79 void CompiledIC::set_ic_destination(address entry_point) { | |
80 assert(entry_point != NULL, "must set legal entry point"); | |
81 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
82 if (TraceCompiledIC) { | |
83 tty->print(" "); | |
84 print_compiled_ic(); | |
85 tty->print_cr(" changing destination to " INTPTR_FORMAT, entry_point); | |
86 } | |
87 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); | |
88 #ifdef ASSERT | |
89 CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call); | |
90 assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); | |
91 #endif | |
92 _ic_call->set_destination_mt_safe(entry_point); | |
93 } | |
94 | |
95 | |
96 address CompiledIC::ic_destination() const { | |
97 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
98 if (!is_in_transition_state()) { | |
99 return _ic_call->destination(); | |
100 } else { | |
101 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this); | |
102 } | |
103 } | |
104 | |
105 | |
106 bool CompiledIC::is_in_transition_state() const { | |
107 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
108 return InlineCacheBuffer::contains(_ic_call->destination()); | |
109 } | |
110 | |
111 | |
112 // Returns native address of 'call' instruction in inline-cache. Used by | |
113 // the InlineCacheBuffer when it needs to find the stub. | |
114 address CompiledIC::stub_address() const { | |
115 assert(is_in_transition_state(), "should only be called when we are in a transition state"); | |
116 return _ic_call->destination(); | |
117 } | |
118 | |
119 | |
120 //----------------------------------------------------------------------------- | |
121 // High-level access to an inline cache. Guaranteed to be MT-safe. | |
122 | |
123 | |
124 void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { | |
125 methodHandle method = call_info->selected_method(); | |
126 bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index()); | |
127 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
128 assert(method->is_oop(), "cannot be NULL and must be oop"); | |
129 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); | |
130 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); | |
131 | |
132 address entry; | |
133 if (is_invoke_interface) { | |
134 int index = klassItable::compute_itable_index(call_info->resolved_method()()); | |
135 entry = VtableStubs::create_stub(false, index, method()); | |
136 assert(entry != NULL, "entry not computed"); | |
137 klassOop k = call_info->resolved_method()->method_holder(); | |
138 assert(Klass::cast(k)->is_interface(), "sanity check"); | |
139 InlineCacheBuffer::create_transition_stub(this, k, entry); | |
140 } else { | |
141 // Can be different than method->vtable_index(), due to package-private etc. | |
142 int vtable_index = call_info->vtable_index(); | |
143 entry = VtableStubs::create_stub(true, vtable_index, method()); | |
144 InlineCacheBuffer::create_transition_stub(this, method(), entry); | |
145 } | |
146 | |
147 if (TraceICs) { | |
148 ResourceMark rm; | |
149 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, | |
150 instruction_address(), method->print_value_string(), entry); | |
151 } | |
152 | |
153 Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, (address)method()); | |
154 // We can't check this anymore. With lazy deopt we could have already | |
155 // cleaned this IC entry before we even return. This is possible if | |
156 // we ran out of space in the inline cache buffer trying to do the | |
157 // set_next and we safepointed to free up space. This is a benign | |
158 // race because the IC entry was complete when we safepointed so | |
159 // cleaning it immediately is harmless. | |
160 // assert(is_megamorphic(), "sanity check"); | |
161 } | |
162 | |
163 | |
164 // true if destination is megamorphic stub | |
165 bool CompiledIC::is_megamorphic() const { | |
166 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
167 assert(!is_optimized(), "an optimized call cannot be megamorphic"); | |
168 | |
169 // Cannot rely on cached_oop. It is either an interface or a method. | |
170 return VtableStubs::is_entry_point(ic_destination()); | |
171 } | |
172 | |
173 bool CompiledIC::is_call_to_compiled() const { | |
174 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
175 | |
176 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie | |
177 // method is guaranteed to still exist, since we only remove methods after all inline caches | |
178 // has been cleaned up | |
179 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); | |
180 bool is_monomorphic = (cb != NULL && cb->is_nmethod()); | |
181 // Check that the cached_oop is a klass for non-optimized monomorphic calls | |
182 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used | |
183 // for calling directly to vep without using the inline cache (i.e., cached_oop == NULL) | |
184 #ifdef ASSERT | |
185 #ifdef TIERED | |
186 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address()); | |
187 bool is_c1_method = caller->is_compiled_by_c1(); | |
188 #else | |
189 #ifdef COMPILER1 | |
190 bool is_c1_method = true; | |
191 #else | |
192 bool is_c1_method = false; | |
193 #endif // COMPILER1 | |
194 #endif // TIERED | |
195 assert( is_c1_method || | |
196 !is_monomorphic || | |
197 is_optimized() || | |
198 (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check"); | |
199 #endif // ASSERT | |
200 return is_monomorphic; | |
201 } | |
202 | |
203 | |
204 bool CompiledIC::is_call_to_interpreted() const { | |
205 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
206 // Call to interpreter if destination is either calling to a stub (if it | |
207 // is optimized), or calling to an I2C blob | |
208 bool is_call_to_interpreted = false; | |
209 if (!is_optimized()) { | |
210 // must use unsafe because the destination can be a zombie (and we're cleaning) | |
211 // and the print_compiled_ic code wants to know if site (in the non-zombie) | |
212 // is to the interpreter. | |
213 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); | |
214 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob()); | |
215 assert(!is_call_to_interpreted || (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check"); | |
216 } else { | |
217 // Check if we are calling into our own codeblob (i.e., to a stub) | |
218 CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address()); | |
219 address dest = ic_destination(); | |
220 #ifdef ASSERT | |
221 { | |
222 CodeBlob* db = CodeCache::find_blob_unsafe(dest); | |
223 assert(!db->is_adapter_blob(), "must use stub!"); | |
224 } | |
225 #endif /* ASSERT */ | |
226 is_call_to_interpreted = cb->contains(dest); | |
227 } | |
228 return is_call_to_interpreted; | |
229 } | |
230 | |
231 | |
232 void CompiledIC::set_to_clean() { | |
233 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); | |
234 if (TraceInlineCacheClearing || TraceICs) { | |
235 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address()); | |
236 print(); | |
237 } | |
238 | |
239 address entry; | |
240 if (is_optimized()) { | |
241 entry = SharedRuntime::get_resolve_opt_virtual_call_stub(); | |
242 } else { | |
243 entry = SharedRuntime::get_resolve_virtual_call_stub(); | |
244 } | |
245 | |
246 // A zombie transition will always be safe, since the oop has already been set to NULL, so | |
247 // we only need to patch the destination | |
248 bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint(); | |
249 | |
250 if (safe_transition) { | |
251 if (!is_optimized()) set_cached_oop(NULL); | |
252 // Kill any leftover stub we might have too | |
253 if (is_in_transition_state()) { | |
254 ICStub* old_stub = ICStub_from_destination_address(stub_address()); | |
255 old_stub->clear(); | |
256 } | |
257 set_ic_destination(entry); | |
258 } else { | |
259 // Unsafe transition - create stub. | |
260 InlineCacheBuffer::create_transition_stub(this, NULL, entry); | |
261 } | |
262 // We can't check this anymore. With lazy deopt we could have already | |
263 // cleaned this IC entry before we even return. This is possible if | |
264 // we ran out of space in the inline cache buffer trying to do the | |
265 // set_next and we safepointed to free up space. This is a benign | |
266 // race because the IC entry was complete when we safepointed so | |
267 // cleaning it immediately is harmless. | |
268 // assert(is_clean(), "sanity check"); | |
269 } | |
270 | |
271 | |
272 bool CompiledIC::is_clean() const { | |
273 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
274 bool is_clean = false; | |
275 address dest = ic_destination(); | |
276 is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() || | |
277 dest == SharedRuntime::get_resolve_virtual_call_stub(); | |
278 assert(!is_clean || is_optimized() || cached_oop() == NULL, "sanity check"); | |
279 return is_clean; | |
280 } | |
281 | |
282 | |
283 void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) { | |
284 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); | |
285 // Updating a cache to the wrong entry can cause bugs that are very hard | |
286 // to track down - if cache entry gets invalid - we just clean it. In | |
287 // this way it is always the same code path that is responsible for | |
288 // updating and resolving an inline cache | |
289 // | |
290 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized | |
291 // callsites. In addition ic_miss code will update a site to monomorphic if it determines | |
292 // that an monomorphic call to the interpreter can now be monomorphic to compiled code. | |
293 // | |
294 // In both of these cases the only thing being modifed is the jump/call target and these | |
295 // transitions are mt_safe | |
296 | |
297 Thread *thread = Thread::current(); | |
298 if (info._to_interpreter) { | |
299 // Call to interpreter | |
300 if (info.is_optimized() && is_optimized()) { | |
301 assert(is_clean(), "unsafe IC path"); | |
302 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); | |
303 // the call analysis (callee structure) specifies that the call is optimized | |
304 // (either because of CHA or the static target is final) | |
305 // At code generation time, this call has been emitted as static call | |
306 // Call via stub | |
307 assert(info.cached_oop().not_null() && info.cached_oop()->is_method(), "sanity check"); | |
308 CompiledStaticCall* csc = compiledStaticCall_at(instruction_address()); | |
309 methodHandle method (thread, (methodOop)info.cached_oop()()); | |
310 csc->set_to_interpreted(method, info.entry()); | |
311 if (TraceICs) { | |
312 ResourceMark rm(thread); | |
313 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s", | |
314 instruction_address(), | |
315 method->print_value_string()); | |
316 } | |
317 } else { | |
318 // Call via method-klass-holder | |
319 assert(info.cached_oop().not_null(), "must be set"); | |
320 InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry()); | |
321 | |
322 if (TraceICs) { | |
323 ResourceMark rm(thread); | |
324 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via mkh", instruction_address()); | |
325 } | |
326 } | |
327 } else { | |
328 // Call to compiled code | |
329 bool static_bound = info.is_optimized() || (info.cached_oop().is_null()); | |
330 #ifdef ASSERT | |
331 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); | |
332 assert (cb->is_nmethod(), "must be compiled!"); | |
333 #endif /* ASSERT */ | |
334 | |
335 // This is MT safe if we come from a clean-cache and go through a | |
336 // non-verified entry point | |
337 bool safe = SafepointSynchronize::is_at_safepoint() || | |
338 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); | |
339 | |
340 if (!safe) { | |
341 InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry()); | |
342 } else { | |
343 set_ic_destination(info.entry()); | |
344 if (!is_optimized()) set_cached_oop(info.cached_oop()()); | |
345 } | |
346 | |
347 if (TraceICs) { | |
348 ResourceMark rm(thread); | |
349 assert(info.cached_oop() == NULL || info.cached_oop()()->is_klass(), "must be"); | |
350 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s", | |
351 instruction_address(), | |
352 ((klassOop)info.cached_oop()())->print_value_string(), | |
353 (safe) ? "" : "via stub"); | |
354 } | |
355 } | |
356 // We can't check this anymore. With lazy deopt we could have already | |
357 // cleaned this IC entry before we even return. This is possible if | |
358 // we ran out of space in the inline cache buffer trying to do the | |
359 // set_next and we safepointed to free up space. This is a benign | |
360 // race because the IC entry was complete when we safepointed so | |
361 // cleaning it immediately is harmless. | |
362 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); | |
363 } | |
364 | |
365 | |
366 // is_optimized: Compiler has generated an optimized call (i.e., no inline | |
367 // cache) static_bound: The call can be static bound (i.e, no need to use | |
368 // inline cache) | |
369 void CompiledIC::compute_monomorphic_entry(methodHandle method, | |
370 KlassHandle receiver_klass, | |
371 bool is_optimized, | |
372 bool static_bound, | |
373 CompiledICInfo& info, | |
374 TRAPS) { | |
375 info._is_optimized = is_optimized; | |
376 | |
377 nmethod* method_code = method->code(); | |
378 address entry = NULL; | |
379 if (method_code != NULL) { | |
380 // Call to compiled code | |
381 if (static_bound || is_optimized) { | |
382 entry = method_code->verified_entry_point(); | |
383 } else { | |
384 entry = method_code->entry_point(); | |
385 } | |
386 } | |
387 if (entry != NULL) { | |
388 // Call to compiled code | |
389 info._entry = entry; | |
390 if (static_bound || is_optimized) { | |
391 info._cached_oop = Handle(THREAD, (oop)NULL); | |
392 } else { | |
393 info._cached_oop = receiver_klass; | |
394 } | |
395 info._to_interpreter = false; | |
396 } else { | |
397 // Note: the following problem exists with Compiler1: | |
398 // - at compile time we may or may not know if the destination is final | |
399 // - if we know that the destination is final, we will emit an optimized | |
400 // virtual call (no inline cache), and need a methodOop to make a call | |
401 // to the interpreter | |
402 // - if we do not know if the destination is final, we emit a standard | |
403 // virtual call, and use CompiledICHolder to call interpreted code | |
404 // (no static call stub has been generated) | |
405 // However in that case we will now notice it is static_bound | |
406 // and convert the call into what looks to be an optimized | |
407 // virtual call. This causes problems in verifying the IC because | |
408 // it look vanilla but is optimized. Code in is_call_to_interpreted | |
409 // is aware of this and weakens its asserts. | |
410 | |
411 info._to_interpreter = true; | |
412 // static_bound should imply is_optimized -- otherwise we have a | |
413 // performance bug (statically-bindable method is called via | |
414 // dynamically-dispatched call note: the reverse implication isn't | |
415 // necessarily true -- the call may have been optimized based on compiler | |
416 // analysis (static_bound is only based on "final" etc.) | |
417 #ifdef COMPILER2 | |
418 #ifdef TIERED | |
419 #if defined(ASSERT) | |
420 // can't check the assert because we don't have the CompiledIC with which to | |
421 // find the address if the call instruction. | |
422 // | |
423 // CodeBlob* cb = find_blob_unsafe(instruction_address()); | |
424 // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized"); | |
425 #endif // ASSERT | |
426 #else | |
427 assert(!static_bound || is_optimized, "static_bound should imply is_optimized"); | |
428 #endif // TIERED | |
429 #endif // COMPILER2 | |
430 if (is_optimized) { | |
431 // Use stub entry | |
432 info._entry = method()->get_c2i_entry(); | |
433 info._cached_oop = method; | |
434 } else { | |
435 // Use mkh entry | |
436 oop holder = oopFactory::new_compiledICHolder(method, receiver_klass, CHECK); | |
437 info._cached_oop = Handle(THREAD, holder); | |
438 info._entry = method()->get_c2i_unverified_entry(); | |
439 } | |
440 } | |
441 } | |
442 | |
443 | |
1563
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
0
diff
changeset
|
444 inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) { |
0 | 445 address first_oop = NULL; |
446 // Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter. | |
1563
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
0
diff
changeset
|
447 nmethod* tmp_nm = nm; |
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
0
diff
changeset
|
448 return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized); |
0 | 449 } |
450 | |
451 CompiledIC::CompiledIC(NativeCall* ic_call) | |
452 : _ic_call(ic_call), | |
453 _oops(parse_ic(NULL, ic_call->instruction_address(), _oop_addr, &_is_optimized)) | |
454 { | |
455 } | |
456 | |
457 | |
458 CompiledIC::CompiledIC(Relocation* ic_reloc) | |
459 : _ic_call(nativeCall_at(ic_reloc->addr())), | |
460 _oops(parse_ic(ic_reloc->code(), ic_reloc->addr(), _oop_addr, &_is_optimized)) | |
461 { | |
462 assert(ic_reloc->type() == relocInfo::virtual_call_type || | |
463 ic_reloc->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); | |
464 } | |
465 | |
466 | |
467 // ---------------------------------------------------------------------------- | |
468 | |
469 void CompiledStaticCall::set_to_clean() { | |
470 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); | |
471 // Reset call site | |
472 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); | |
473 #ifdef ASSERT | |
474 CodeBlob* cb = CodeCache::find_blob_unsafe(this); | |
475 assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); | |
476 #endif | |
477 set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub()); | |
478 | |
479 // Do not reset stub here: It is too expensive to call find_stub. | |
480 // Instead, rely on caller (nmethod::clear_inline_caches) to clear | |
481 // both the call and its stub. | |
482 } | |
483 | |
484 | |
485 bool CompiledStaticCall::is_clean() const { | |
486 return destination() == SharedRuntime::get_resolve_static_call_stub(); | |
487 } | |
488 | |
489 bool CompiledStaticCall::is_call_to_compiled() const { | |
490 return CodeCache::contains(destination()); | |
491 } | |
492 | |
493 | |
494 bool CompiledStaticCall::is_call_to_interpreted() const { | |
495 // It is a call to interpreted, if it calls to a stub. Hence, the destination | |
496 // must be in the stub part of the nmethod that contains the call | |
497 nmethod* nm = CodeCache::find_nmethod(instruction_address()); | |
498 return nm->stub_contains(destination()); | |
499 } | |
500 | |
501 | |
502 void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { | |
503 address stub=find_stub(); | |
504 assert(stub!=NULL, "stub not found"); | |
505 | |
506 if (TraceICs) { | |
507 ResourceMark rm; | |
508 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", | |
509 instruction_address(), | |
510 callee->name_and_sig_as_C_string()); | |
511 } | |
512 | |
513 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object | |
514 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); | |
515 | |
516 assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache"); | |
517 assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache"); | |
518 | |
519 // Update stub | |
520 method_holder->set_data((intptr_t)callee()); | |
521 jump->set_jump_destination(entry); | |
522 | |
523 // Update jump to call | |
524 set_destination_mt_safe(stub); | |
525 } | |
526 | |
527 | |
528 void CompiledStaticCall::set(const StaticCallInfo& info) { | |
529 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); | |
530 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); | |
531 // Updating a cache to the wrong entry can cause bugs that are very hard | |
532 // to track down - if cache entry gets invalid - we just clean it. In | |
533 // this way it is always the same code path that is responsible for | |
534 // updating and resolving an inline cache | |
535 assert(is_clean(), "do not update a call entry - use clean"); | |
536 | |
537 if (info._to_interpreter) { | |
538 // Call to interpreted code | |
539 set_to_interpreted(info.callee(), info.entry()); | |
540 } else { | |
541 if (TraceICs) { | |
542 ResourceMark rm; | |
543 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, | |
544 instruction_address(), | |
545 info.entry()); | |
546 } | |
547 // Call to compiled code | |
548 assert (CodeCache::contains(info.entry()), "wrong entry point"); | |
549 set_destination_mt_safe(info.entry()); | |
550 } | |
551 } | |
552 | |
553 | |
554 // Compute settings for a CompiledStaticCall. Since we might have to set | |
555 // the stub when calling to the interpreter, we need to return arguments. | |
556 void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) { | |
557 nmethod* m_code = m->code(); | |
558 info._callee = m; | |
559 if (m_code != NULL) { | |
560 info._to_interpreter = false; | |
561 info._entry = m_code->verified_entry_point(); | |
562 } else { | |
563 // Callee is interpreted code. In any case entering the interpreter | |
564 // puts a converter-frame on the stack to save arguments. | |
565 info._to_interpreter = true; | |
566 info._entry = m()->get_c2i_entry(); | |
567 } | |
568 } | |
569 | |
570 | |
571 void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { | |
572 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); | |
573 // Reset stub | |
574 address stub = static_stub->addr(); | |
575 assert(stub!=NULL, "stub not found"); | |
576 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object | |
577 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); | |
578 method_holder->set_data(0); | |
579 jump->set_jump_destination((address)-1); | |
580 } | |
581 | |
582 | |
583 address CompiledStaticCall::find_stub() { | |
584 // Find reloc. information containing this call-site | |
585 RelocIterator iter((nmethod*)NULL, instruction_address()); | |
586 while (iter.next()) { | |
587 if (iter.addr() == instruction_address()) { | |
588 switch(iter.type()) { | |
589 case relocInfo::static_call_type: | |
590 return iter.static_call_reloc()->static_stub(); | |
591 // We check here for opt_virtual_call_type, since we reuse the code | |
592 // from the CompiledIC implementation | |
593 case relocInfo::opt_virtual_call_type: | |
594 return iter.opt_virtual_call_reloc()->static_stub(); | |
595 case relocInfo::poll_type: | |
596 case relocInfo::poll_return_type: // A safepoint can't overlap a call. | |
597 default: | |
598 ShouldNotReachHere(); | |
599 } | |
600 } | |
601 } | |
602 return NULL; | |
603 } | |
604 | |
605 | |
606 //----------------------------------------------------------------------------- | |
607 // Non-product mode code | |
608 #ifndef PRODUCT | |
609 | |
610 void CompiledIC::verify() { | |
611 // make sure code pattern is actually a call imm32 instruction | |
612 _ic_call->verify(); | |
613 if (os::is_MP()) { | |
614 _ic_call->verify_alignment(); | |
615 } | |
616 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() | |
617 || is_optimized() || is_megamorphic(), "sanity check"); | |
618 } | |
619 | |
620 | |
621 void CompiledIC::print() { | |
622 print_compiled_ic(); | |
623 tty->cr(); | |
624 } | |
625 | |
626 | |
627 void CompiledIC::print_compiled_ic() { | |
628 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT, | |
629 instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination()); | |
630 } | |
631 | |
632 | |
633 void CompiledStaticCall::print() { | |
634 tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address()); | |
635 if (is_clean()) { | |
636 tty->print("clean"); | |
637 } else if (is_call_to_compiled()) { | |
638 tty->print("compiled"); | |
639 } else if (is_call_to_interpreted()) { | |
640 tty->print("interpreted"); | |
641 } | |
642 tty->cr(); | |
643 } | |
644 | |
645 void CompiledStaticCall::verify() { | |
646 // Verify call | |
647 NativeCall::verify(); | |
648 if (os::is_MP()) { | |
649 verify_alignment(); | |
650 } | |
651 | |
652 // Verify stub | |
653 address stub = find_stub(); | |
654 assert(stub != NULL, "no stub found for static call"); | |
655 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object | |
656 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); | |
657 | |
658 // Verify state | |
659 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); | |
660 } | |
661 | |
662 #endif |