comparison src/share/vm/opto/doCall.cpp @ 6275:957c266d8bc5

Merge with http://hg.openjdk.java.net/hsx/hsx24/hotspot/
author Doug Simon <doug.simon@oracle.com>
date Tue, 21 Aug 2012 10:39:19 +0200
parents b72784e722ff
children 7f813940ac35
comparison
equal deleted inserted replaced
5891:fd8832ae511d 6275:957c266d8bc5
57 tty->cr(); 57 tty->cr();
58 } 58 }
59 } 59 }
60 #endif 60 #endif
61 61
62 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, 62 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_is_virtual,
63 JVMState* jvms, bool allow_inline, 63 JVMState* jvms, bool allow_inline,
64 float prof_factor) { 64 float prof_factor, bool allow_intrinsics) {
65 ciMethod* caller = jvms->method(); 65 ciMethod* caller = jvms->method();
66 int bci = jvms->bci(); 66 int bci = jvms->bci();
67 Bytecodes::Code bytecode = caller->java_code_at_bci(bci); 67 Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
68 guarantee(call_method != NULL, "failed method resolution"); 68 guarantee(callee != NULL, "failed method resolution");
69 69
70 // Dtrace currently doesn't work unless all calls are vanilla 70 // Dtrace currently doesn't work unless all calls are vanilla
71 if (env()->dtrace_method_probes()) { 71 if (env()->dtrace_method_probes()) {
72 allow_inline = false; 72 allow_inline = false;
73 } 73 }
89 CompileLog* log = this->log(); 89 CompileLog* log = this->log();
90 if (log != NULL) { 90 if (log != NULL) {
91 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; 91 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
92 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1; 92 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
93 log->begin_elem("call method='%d' count='%d' prof_factor='%g'", 93 log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
94 log->identify(call_method), site_count, prof_factor); 94 log->identify(callee), site_count, prof_factor);
95 if (call_is_virtual) log->print(" virtual='1'"); 95 if (call_is_virtual) log->print(" virtual='1'");
96 if (allow_inline) log->print(" inline='1'"); 96 if (allow_inline) log->print(" inline='1'");
97 if (receiver_count >= 0) { 97 if (receiver_count >= 0) {
98 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count); 98 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
99 if (profile.has_receiver(1)) { 99 if (profile.has_receiver(1)) {
106 // Special case the handling of certain common, profitable library 106 // Special case the handling of certain common, profitable library
107 // methods. If these methods are replaced with specialized code, 107 // methods. If these methods are replaced with specialized code,
108 // then we return it as the inlined version of the call. 108 // then we return it as the inlined version of the call.
109 // We do this before the strict f.p. check below because the 109 // We do this before the strict f.p. check below because the
110 // intrinsics handle strict f.p. correctly. 110 // intrinsics handle strict f.p. correctly.
111 if (allow_inline) { 111 if (allow_inline && allow_intrinsics) {
112 CallGenerator* cg = find_intrinsic(call_method, call_is_virtual); 112 CallGenerator* cg = find_intrinsic(callee, call_is_virtual);
113 if (cg != NULL) return cg; 113 if (cg != NULL) return cg;
114 } 114 }
115 115
116 // Do method handle calls. 116 // Do method handle calls.
117 // NOTE: This must happen before normal inlining logic below since 117 // NOTE: This must happen before normal inlining logic below since
118 // MethodHandle.invoke* are native methods which obviously don't 118 // MethodHandle.invoke* are native methods which obviously don't
119 // have bytecodes and so normal inlining fails. 119 // have bytecodes and so normal inlining fails.
120 if (call_method->is_method_handle_invoke()) { 120 if (callee->is_method_handle_intrinsic()) {
121 if (bytecode != Bytecodes::_invokedynamic) { 121 return CallGenerator::for_method_handle_call(jvms, caller, callee);
122 GraphKit kit(jvms);
123 Node* method_handle = kit.argument(0);
124 return CallGenerator::for_method_handle_call(method_handle, jvms, caller, call_method, profile);
125 }
126 else {
127 return CallGenerator::for_invokedynamic_call(jvms, caller, call_method, profile);
128 }
129 } 122 }
130 123
131 // Do not inline strict fp into non-strict code, or the reverse 124 // Do not inline strict fp into non-strict code, or the reverse
132 if (caller->is_strict() ^ call_method->is_strict()) { 125 if (caller->is_strict() ^ callee->is_strict()) {
133 allow_inline = false; 126 allow_inline = false;
134 } 127 }
135 128
136 // Attempt to inline... 129 // Attempt to inline...
137 if (allow_inline) { 130 if (allow_inline) {
153 // Note: ilt is for the root of this parse, not the present call site. 146 // Note: ilt is for the root of this parse, not the present call site.
154 ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel); 147 ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
155 } 148 }
156 WarmCallInfo scratch_ci; 149 WarmCallInfo scratch_ci;
157 if (!UseOldInlining) 150 if (!UseOldInlining)
158 scratch_ci.init(jvms, call_method, profile, prof_factor); 151 scratch_ci.init(jvms, callee, profile, prof_factor);
159 WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci); 152 WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci);
160 assert(ci != &scratch_ci, "do not let this pointer escape"); 153 assert(ci != &scratch_ci, "do not let this pointer escape");
161 bool allow_inline = (ci != NULL && !ci->is_cold()); 154 bool allow_inline = (ci != NULL && !ci->is_cold());
162 bool require_inline = (allow_inline && ci->is_hot()); 155 bool require_inline = (allow_inline && ci->is_hot());
163 156
164 if (allow_inline) { 157 if (allow_inline) {
165 CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses); 158 CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
166 if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) { 159 if (require_inline && cg != NULL && should_delay_inlining(callee, jvms)) {
167 // Delay the inlining of this method to give us the 160 // Delay the inlining of this method to give us the
168 // opportunity to perform some high level optimizations 161 // opportunity to perform some high level optimizations
169 // first. 162 // first.
170 return CallGenerator::for_late_inline(call_method, cg); 163 return CallGenerator::for_late_inline(callee, cg);
171 } 164 }
172 if (cg == NULL) { 165 if (cg == NULL) {
173 // Fall through. 166 // Fall through.
174 } else if (require_inline || !InlineWarmCalls) { 167 } else if (require_inline || !InlineWarmCalls) {
175 return cg; 168 return cg;
176 } else { 169 } else {
177 CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor); 170 CallGenerator* cold_cg = call_generator(callee, vtable_index, call_is_virtual, jvms, false, prof_factor);
178 return CallGenerator::for_warm_call(ci, cold_cg, cg); 171 return CallGenerator::for_warm_call(ci, cold_cg, cg);
179 } 172 }
180 } 173 }
181 } 174 }
182 175
187 ciMethod* receiver_method = NULL; 180 ciMethod* receiver_method = NULL;
188 if (have_major_receiver || profile.morphism() == 1 || 181 if (have_major_receiver || profile.morphism() == 1 ||
189 (profile.morphism() == 2 && UseBimorphicInlining)) { 182 (profile.morphism() == 2 && UseBimorphicInlining)) {
190 // receiver_method = profile.method(); 183 // receiver_method = profile.method();
191 // Profiles do not suggest methods now. Look it up in the major receiver. 184 // Profiles do not suggest methods now. Look it up in the major receiver.
192 receiver_method = call_method->resolve_invoke(jvms->method()->holder(), 185 receiver_method = callee->resolve_invoke(jvms->method()->holder(),
193 profile.receiver(0)); 186 profile.receiver(0));
194 } 187 }
195 if (receiver_method != NULL) { 188 if (receiver_method != NULL) {
196 // The single majority receiver sufficiently outweighs the minority. 189 // The single majority receiver sufficiently outweighs the minority.
197 CallGenerator* hit_cg = this->call_generator(receiver_method, 190 CallGenerator* hit_cg = this->call_generator(receiver_method,
199 if (hit_cg != NULL) { 192 if (hit_cg != NULL) {
200 // Look up second receiver. 193 // Look up second receiver.
201 CallGenerator* next_hit_cg = NULL; 194 CallGenerator* next_hit_cg = NULL;
202 ciMethod* next_receiver_method = NULL; 195 ciMethod* next_receiver_method = NULL;
203 if (profile.morphism() == 2 && UseBimorphicInlining) { 196 if (profile.morphism() == 2 && UseBimorphicInlining) {
204 next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(), 197 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
205 profile.receiver(1)); 198 profile.receiver(1));
206 if (next_receiver_method != NULL) { 199 if (next_receiver_method != NULL) {
207 next_hit_cg = this->call_generator(next_receiver_method, 200 next_hit_cg = this->call_generator(next_receiver_method,
208 vtable_index, !call_is_virtual, jvms, 201 vtable_index, !call_is_virtual, jvms,
209 allow_inline, prof_factor); 202 allow_inline, prof_factor);
222 (profile.morphism() == 2 && next_hit_cg != NULL) ) && 215 (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
223 !too_many_traps(jvms->method(), jvms->bci(), reason) 216 !too_many_traps(jvms->method(), jvms->bci(), reason)
224 ) { 217 ) {
225 // Generate uncommon trap for class check failure path 218 // Generate uncommon trap for class check failure path
226 // in case of monomorphic or bimorphic virtual call site. 219 // in case of monomorphic or bimorphic virtual call site.
227 miss_cg = CallGenerator::for_uncommon_trap(call_method, reason, 220 miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
228 Deoptimization::Action_maybe_recompile); 221 Deoptimization::Action_maybe_recompile);
229 } else { 222 } else {
230 // Generate virtual call for class check failure path 223 // Generate virtual call for class check failure path
231 // in case of polymorphic virtual call site. 224 // in case of polymorphic virtual call site.
232 miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index); 225 miss_cg = CallGenerator::for_virtual_call(callee, vtable_index);
233 } 226 }
234 if (miss_cg != NULL) { 227 if (miss_cg != NULL) {
235 if (next_hit_cg != NULL) { 228 if (next_hit_cg != NULL) {
236 NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1))); 229 NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)));
237 // We don't need to record dependency on a receiver here and below. 230 // We don't need to record dependency on a receiver here and below.
250 } 243 }
251 244
252 // There was no special inlining tactic, or it bailed out. 245 // There was no special inlining tactic, or it bailed out.
253 // Use a more generic tactic, like a simple call. 246 // Use a more generic tactic, like a simple call.
254 if (call_is_virtual) { 247 if (call_is_virtual) {
255 return CallGenerator::for_virtual_call(call_method, vtable_index); 248 return CallGenerator::for_virtual_call(callee, vtable_index);
256 } else { 249 } else {
257 // Class Hierarchy Analysis or Type Profile reveals a unique target, 250 // Class Hierarchy Analysis or Type Profile reveals a unique target,
258 // or it is a static or special call. 251 // or it is a static or special call.
259 return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms)); 252 return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));
260 } 253 }
261 } 254 }
262 255
263 // Return true for methods that shouldn't be inlined early so that 256 // Return true for methods that shouldn't be inlined early so that
264 // they are easier to analyze and optimize as intrinsics. 257 // they are easier to analyze and optimize as intrinsics.
353 bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial; 346 bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
354 bool is_invokedynamic = bc() == Bytecodes::_invokedynamic; 347 bool is_invokedynamic = bc() == Bytecodes::_invokedynamic;
355 348
356 // Find target being called 349 // Find target being called
357 bool will_link; 350 bool will_link;
358 ciMethod* dest_method = iter().get_method(will_link); 351 ciMethod* bc_callee = iter().get_method(will_link); // actual callee from bytecode
359 ciInstanceKlass* holder_klass = dest_method->holder(); 352 ciInstanceKlass* holder_klass = bc_callee->holder();
360 ciKlass* holder = iter().get_declared_method_holder(); 353 ciKlass* holder = iter().get_declared_method_holder();
361 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 354 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
362 355
363 int nargs = dest_method->arg_size();
364 if (is_invokedynamic) nargs -= 1;
365
366 // uncommon-trap when callee is unloaded, uninitialized or will not link 356 // uncommon-trap when callee is unloaded, uninitialized or will not link
367 // bailout when too many arguments for register representation 357 // bailout when too many arguments for register representation
368 if (!will_link || can_not_compile_call_site(dest_method, klass)) { 358 if (!will_link || can_not_compile_call_site(bc_callee, klass)) {
369 #ifndef PRODUCT 359 #ifndef PRODUCT
370 if (PrintOpto && (Verbose || WizardMode)) { 360 if (PrintOpto && (Verbose || WizardMode)) {
371 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); 361 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
372 dest_method->print_name(); tty->cr(); 362 bc_callee->print_name(); tty->cr();
373 } 363 }
374 #endif 364 #endif
375 return; 365 return;
376 } 366 }
377 assert(holder_klass->is_loaded(), ""); 367 assert(holder_klass->is_loaded(), "");
378 assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); 368 //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw)
379 // Note: this takes into account invokeinterface of methods declared in java/lang/Object, 369 // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
380 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces 370 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
381 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); 371 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
382 // Note: In the absence of miranda methods, an abstract class K can perform 372 // Note: In the absence of miranda methods, an abstract class K can perform
383 // an invokevirtual directly on an interface method I.m if K implements I. 373 // an invokevirtual directly on an interface method I.m if K implements I.
374
375 const int nargs = bc_callee->arg_size();
376
377 // Push appendix argument (MethodType, CallSite, etc.), if one.
378 if (iter().has_appendix()) {
379 ciObject* appendix_arg = iter().get_appendix();
380 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg);
381 Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);
382 push(appendix_arg_node);
383 }
384 384
385 // --------------------- 385 // ---------------------
386 // Does Class Hierarchy Analysis reveal only a single target of a v-call? 386 // Does Class Hierarchy Analysis reveal only a single target of a v-call?
387 // Then we may inline or make a static call, but become dependent on there being only 1 target. 387 // Then we may inline or make a static call, but become dependent on there being only 1 target.
388 // Does the call-site type profile reveal only one receiver? 388 // Does the call-site type profile reveal only one receiver?
390 // The other path may uncommon_trap, check for another receiver, or do a v-call. 390 // The other path may uncommon_trap, check for another receiver, or do a v-call.
391 391
392 // Choose call strategy. 392 // Choose call strategy.
393 bool call_is_virtual = is_virtual_or_interface; 393 bool call_is_virtual = is_virtual_or_interface;
394 int vtable_index = methodOopDesc::invalid_vtable_index; 394 int vtable_index = methodOopDesc::invalid_vtable_index;
395 ciMethod* call_method = dest_method; 395 ciMethod* callee = bc_callee;
396 396
397 // Try to get the most accurate receiver type 397 // Try to get the most accurate receiver type
398 if (is_virtual_or_interface) { 398 if (is_virtual_or_interface) {
399 Node* receiver_node = stack(sp() - nargs); 399 Node* receiver_node = stack(sp() - nargs);
400 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); 400 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
401 ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type); 401 ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, bc_callee, receiver_type);
402 402
403 // Have the call been sufficiently improved such that it is no longer a virtual? 403 // Have the call been sufficiently improved such that it is no longer a virtual?
404 if (optimized_virtual_method != NULL) { 404 if (optimized_virtual_method != NULL) {
405 call_method = optimized_virtual_method; 405 callee = optimized_virtual_method;
406 call_is_virtual = false; 406 call_is_virtual = false;
407 } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) { 407 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
408 // We can make a vtable call at this site 408 // We can make a vtable call at this site
409 vtable_index = call_method->resolve_vtable_index(method()->holder(), klass); 409 vtable_index = callee->resolve_vtable_index(method()->holder(), klass);
410 } 410 }
411 } 411 }
412 412
413 // Note: It's OK to try to inline a virtual call. 413 // Note: It's OK to try to inline a virtual call.
414 // The call generator will not attempt to inline a polymorphic call 414 // The call generator will not attempt to inline a polymorphic call
415 // unless it knows how to optimize the receiver dispatch. 415 // unless it knows how to optimize the receiver dispatch.
416 bool try_inline = (C->do_inlining() || InlineAccessors); 416 bool try_inline = (C->do_inlining() || InlineAccessors);
417 417
418 // --------------------- 418 // ---------------------
419 inc_sp(- nargs); // Temporarily pop args for JVM state of call 419 dec_sp(nargs); // Temporarily pop args for JVM state of call
420 JVMState* jvms = sync_jvms(); 420 JVMState* jvms = sync_jvms();
421 421
422 // --------------------- 422 // ---------------------
423 // Decide call tactic. 423 // Decide call tactic.
424 // This call checks with CHA, the interpreter profile, intrinsics table, etc. 424 // This call checks with CHA, the interpreter profile, intrinsics table, etc.
425 // It decides whether inlining is desirable or not. 425 // It decides whether inlining is desirable or not.
426 CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); 426 CallGenerator* cg = C->call_generator(callee, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
427
428 bc_callee = callee = NULL; // don't use bc_callee and callee after this point
427 429
428 // --------------------- 430 // ---------------------
429 // Round double arguments before call 431 // Round double arguments before call
430 round_double_arguments(dest_method); 432 round_double_arguments(cg->method());
431 433
432 #ifndef PRODUCT 434 #ifndef PRODUCT
433 // bump global counters for calls 435 // bump global counters for calls
434 count_compiled_calls(false/*at_method_entry*/, cg->is_inline()); 436 count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
435 437
436 // Record first part of parsing work for this call 438 // Record first part of parsing work for this call
437 parse_histogram()->record_change(); 439 parse_histogram()->record_change();
438 #endif // not PRODUCT 440 #endif // not PRODUCT
439 441
445 447
446 // Bump method data counters (We profile *before* the call is made 448 // Bump method data counters (We profile *before* the call is made
447 // because exceptions don't return to the call site.) 449 // because exceptions don't return to the call site.)
448 profile_call(receiver); 450 profile_call(receiver);
449 451
450 JVMState* new_jvms; 452 JVMState* new_jvms = cg->generate(jvms);
451 if ((new_jvms = cg->generate(jvms)) == NULL) { 453 if (new_jvms == NULL) {
452 // When inlining attempt fails (e.g., too many arguments), 454 // When inlining attempt fails (e.g., too many arguments),
453 // it may contaminate the current compile state, making it 455 // it may contaminate the current compile state, making it
454 // impossible to pull back and try again. Once we call 456 // impossible to pull back and try again. Once we call
455 // cg->generate(), we are committed. If it fails, the whole 457 // cg->generate(), we are committed. If it fails, the whole
456 // compilation task is compromised. 458 // compilation task is compromised.
457 if (failing()) return; 459 if (failing()) return;
458 #ifndef PRODUCT 460
459 if (PrintOpto || PrintOptoInlining || PrintInlining) {
460 // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
461 if (cg->is_intrinsic() && call_method->code_size() > 0) {
462 tty->print("Bailed out of intrinsic, will not inline: ");
463 call_method->print_name(); tty->cr();
464 }
465 }
466 #endif
467 // This can happen if a library intrinsic is available, but refuses 461 // This can happen if a library intrinsic is available, but refuses
468 // the call site, perhaps because it did not match a pattern the 462 // the call site, perhaps because it did not match a pattern the
469 // intrinsic was expecting to optimize. The fallback position is 463 // intrinsic was expecting to optimize. Should always be possible to
470 // to call out-of-line. 464 // get a normal java call that may inline in that case
471 try_inline = false; // Inline tactic bailed out. 465 cg = C->call_generator(cg->method(), vtable_index, call_is_virtual, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
472 cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
473 if ((new_jvms = cg->generate(jvms)) == NULL) { 466 if ((new_jvms = cg->generate(jvms)) == NULL) {
474 guarantee(failing(), "call failed to generate: calls should work"); 467 guarantee(failing(), "call failed to generate: calls should work");
475 return; 468 return;
476 } 469 }
477 } 470 }
478 471
479 if (cg->is_inline()) { 472 if (cg->is_inline()) {
480 // Accumulate has_loops estimate 473 // Accumulate has_loops estimate
481 C->set_has_loops(C->has_loops() || call_method->has_loops()); 474 C->set_has_loops(C->has_loops() || cg->method()->has_loops());
482 C->env()->notice_inlined_method(call_method); 475 C->env()->notice_inlined_method(cg->method());
483 } 476 }
484 477
485 // Reset parser state from [new_]jvms, which now carries results of the call. 478 // Reset parser state from [new_]jvms, which now carries results of the call.
486 // Return value (if any) is already pushed on the stack by the cg. 479 // Return value (if any) is already pushed on the stack by the cg.
487 add_exception_states_from(new_jvms); 480 add_exception_states_from(new_jvms);
499 Node* cast = cast_not_null(receiver); 492 Node* cast = cast_not_null(receiver);
500 // %%% assert(receiver == cast, "should already have cast the receiver"); 493 // %%% assert(receiver == cast, "should already have cast the receiver");
501 } 494 }
502 495
503 // Round double result after a call from strict to non-strict code 496 // Round double result after a call from strict to non-strict code
504 round_double_result(dest_method); 497 round_double_result(cg->method());
498
499 ciType* rtype = cg->method()->return_type();
500 if (iter().cur_bc_raw() == Bytecodes::_invokehandle || is_invokedynamic) {
501 // Be careful here with return types.
502 ciType* ctype = iter().get_declared_method_signature()->return_type();
503 if (ctype != rtype) {
504 BasicType rt = rtype->basic_type();
505 BasicType ct = ctype->basic_type();
506 Node* retnode = peek();
507 if (ct == T_VOID) {
508 // It's OK for a method to return a value that is discarded.
509 // The discarding does not require any special action from the caller.
510 // The Java code knows this, at VerifyType.isNullConversion.
511 pop_node(rt); // whatever it was, pop it
512 retnode = top();
513 } else if (rt == T_INT || is_subword_type(rt)) {
514 // FIXME: This logic should be factored out.
515 if (ct == T_BOOLEAN) {
516 retnode = _gvn.transform( new (C, 3) AndINode(retnode, intcon(0x1)) );
517 } else if (ct == T_CHAR) {
518 retnode = _gvn.transform( new (C, 3) AndINode(retnode, intcon(0xFFFF)) );
519 } else if (ct == T_BYTE) {
520 retnode = _gvn.transform( new (C, 3) LShiftINode(retnode, intcon(24)) );
521 retnode = _gvn.transform( new (C, 3) RShiftINode(retnode, intcon(24)) );
522 } else if (ct == T_SHORT) {
523 retnode = _gvn.transform( new (C, 3) LShiftINode(retnode, intcon(16)) );
524 retnode = _gvn.transform( new (C, 3) RShiftINode(retnode, intcon(16)) );
525 } else {
526 assert(ct == T_INT, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct)));
527 }
528 } else if (rt == T_OBJECT || rt == T_ARRAY) {
529 assert(ct == T_OBJECT || ct == T_ARRAY, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct)));
530 if (ctype->is_loaded()) {
531 Node* if_fail = top();
532 retnode = gen_checkcast(retnode, makecon(TypeKlassPtr::make(ctype->as_klass())), &if_fail);
533 if (if_fail != top()) {
534 PreserveJVMState pjvms(this);
535 set_control(if_fail);
536 builtin_throw(Deoptimization::Reason_class_check);
537 }
538 pop();
539 push(retnode);
540 }
541 } else {
542 assert(ct == rt, err_msg_res("unexpected mismatch rt=%d, ct=%d", rt, ct));
543 // push a zero; it's better than getting an oop/int mismatch
544 retnode = pop_node(rt);
545 retnode = zerocon(ct);
546 push_node(ct, retnode);
547 }
548 // Now that the value is well-behaved, continue with the call-site type.
549 rtype = ctype;
550 }
551 }
505 552
506 // If the return type of the method is not loaded, assert that the 553 // If the return type of the method is not loaded, assert that the
507 // value we got is a null. Otherwise, we need to recompile. 554 // value we got is a null. Otherwise, we need to recompile.
508 if (!dest_method->return_type()->is_loaded()) { 555 if (!rtype->is_loaded()) {
509 #ifndef PRODUCT 556 #ifndef PRODUCT
510 if (PrintOpto && (Verbose || WizardMode)) { 557 if (PrintOpto && (Verbose || WizardMode)) {
511 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci()); 558 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
512 dest_method->print_name(); tty->cr(); 559 cg->method()->print_name(); tty->cr();
513 } 560 }
514 #endif 561 #endif
515 if (C->log() != NULL) { 562 if (C->log() != NULL) {
516 C->log()->elem("assert_null reason='return' klass='%d'", 563 C->log()->elem("assert_null reason='return' klass='%d'",
517 C->log()->identify(dest_method->return_type())); 564 C->log()->identify(rtype));
518 } 565 }
519 // If there is going to be a trap, put it at the next bytecode: 566 // If there is going to be a trap, put it at the next bytecode:
520 set_bci(iter().next_bci()); 567 set_bci(iter().next_bci());
521 do_null_assert(peek(), T_OBJECT); 568 do_null_assert(peek(), T_OBJECT);
522 set_bci(iter().cur_bci()); // put it back 569 set_bci(iter().cur_bci()); // put it back
592 if (saw_unloaded->contains(handler_bci)) { 639 if (saw_unloaded->contains(handler_bci)) {
593 // An unloaded exception type is coming here. Do an uncommon trap. 640 // An unloaded exception type is coming here. Do an uncommon trap.
594 #ifndef PRODUCT 641 #ifndef PRODUCT
595 // We do not expect the same handler bci to take both cold unloaded 642 // We do not expect the same handler bci to take both cold unloaded
596 // and hot loaded exceptions. But, watch for it. 643 // and hot loaded exceptions. But, watch for it.
597 if (extype->is_loaded()) { 644 if ((Verbose || WizardMode) && extype->is_loaded()) {
598 tty->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in "); 645 tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());
599 method()->print_name(); tty->cr(); 646 method()->print_name(); tty->cr();
600 } else if (PrintOpto && (Verbose || WizardMode)) { 647 } else if (PrintOpto && (Verbose || WizardMode)) {
601 tty->print("Bailing out on unloaded exception type "); 648 tty->print("Bailing out on unloaded exception type ");
602 extype->klass()->print_name(); 649 extype->klass()->print_name();
603 tty->print(" at bci:%d in ", bci()); 650 tty->print(" at bci:%d in ", bci());
787 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) { 834 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
788 if( CountCompiledCalls ) { 835 if( CountCompiledCalls ) {
789 if( at_method_entry ) { 836 if( at_method_entry ) {
790 // bump invocation counter if top method (for statistics) 837 // bump invocation counter if top method (for statistics)
791 if (CountCompiledCalls && depth() == 1) { 838 if (CountCompiledCalls && depth() == 1) {
792 const TypeInstPtr* addr_type = TypeInstPtr::make(method()); 839 const TypeOopPtr* addr_type = TypeOopPtr::make_from_constant(method());
793 Node* adr1 = makecon(addr_type); 840 Node* adr1 = makecon(addr_type);
794 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset())); 841 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset()));
795 increment_counter(adr2); 842 increment_counter(adr2);
796 } 843 }
797 } else if (is_inline) { 844 } else if (is_inline) {