Mercurial > hg > truffle
comparison src/share/vm/opto/doCall.cpp @ 7478:5698813d45eb
8005418: JSR 292: virtual dispatch bug in 292 impl
Reviewed-by: jrose, kvn
author | twisti |
---|---|
date | Wed, 09 Jan 2013 15:37:23 -0800 |
parents | d092d1b31229 |
children | f1de9dbc914e |
comparison
equal
deleted
inserted
replaced
7477:038dd2875b94 | 7478:5698813d45eb |
---|---|
59 out->print(ss.as_string()); | 59 out->print(ss.as_string()); |
60 out->cr(); | 60 out->cr(); |
61 } | 61 } |
62 } | 62 } |
63 | 63 |
64 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_is_virtual, | 64 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch, |
65 JVMState* jvms, bool allow_inline, | 65 JVMState* jvms, bool allow_inline, |
66 float prof_factor, bool allow_intrinsics, bool delayed_forbidden) { | 66 float prof_factor, bool allow_intrinsics, bool delayed_forbidden) { |
67 ciMethod* caller = jvms->method(); | 67 ciMethod* caller = jvms->method(); |
68 int bci = jvms->bci(); | 68 int bci = jvms->bci(); |
69 Bytecodes::Code bytecode = caller->java_code_at_bci(bci); | 69 Bytecodes::Code bytecode = caller->java_code_at_bci(bci); |
80 ciCallProfile profile = caller->call_profile_at_bci(bci); | 80 ciCallProfile profile = caller->call_profile_at_bci(bci); |
81 | 81 |
82 // See how many times this site has been invoked. | 82 // See how many times this site has been invoked. |
83 int site_count = profile.count(); | 83 int site_count = profile.count(); |
84 int receiver_count = -1; | 84 int receiver_count = -1; |
85 if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) { | 85 if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) { |
86 // Receivers in the profile structure are ordered by call counts | 86 // Receivers in the profile structure are ordered by call counts |
87 // so that the most called (major) receiver is profile.receiver(0). | 87 // so that the most called (major) receiver is profile.receiver(0). |
88 receiver_count = profile.receiver_count(0); | 88 receiver_count = profile.receiver_count(0); |
89 } | 89 } |
90 | 90 |
92 if (log != NULL) { | 92 if (log != NULL) { |
93 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; | 93 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; |
94 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1; | 94 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1; |
95 log->begin_elem("call method='%d' count='%d' prof_factor='%g'", | 95 log->begin_elem("call method='%d' count='%d' prof_factor='%g'", |
96 log->identify(callee), site_count, prof_factor); | 96 log->identify(callee), site_count, prof_factor); |
97 if (call_is_virtual) log->print(" virtual='1'"); | 97 if (call_does_dispatch) log->print(" virtual='1'"); |
98 if (allow_inline) log->print(" inline='1'"); | 98 if (allow_inline) log->print(" inline='1'"); |
99 if (receiver_count >= 0) { | 99 if (receiver_count >= 0) { |
100 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count); | 100 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count); |
101 if (profile.has_receiver(1)) { | 101 if (profile.has_receiver(1)) { |
102 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1)); | 102 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1)); |
109 // methods. If these methods are replaced with specialized code, | 109 // methods. If these methods are replaced with specialized code, |
110 // then we return it as the inlined version of the call. | 110 // then we return it as the inlined version of the call. |
111 // We do this before the strict f.p. check below because the | 111 // We do this before the strict f.p. check below because the |
112 // intrinsics handle strict f.p. correctly. | 112 // intrinsics handle strict f.p. correctly. |
113 if (allow_inline && allow_intrinsics) { | 113 if (allow_inline && allow_intrinsics) { |
114 CallGenerator* cg = find_intrinsic(callee, call_is_virtual); | 114 CallGenerator* cg = find_intrinsic(callee, call_does_dispatch); |
115 if (cg != NULL) { | 115 if (cg != NULL) { |
116 if (cg->is_predicted()) { | 116 if (cg->is_predicted()) { |
117 // Code without intrinsic but, hopefully, inlined. | 117 // Code without intrinsic but, hopefully, inlined. |
118 CallGenerator* inline_cg = this->call_generator(callee, | 118 CallGenerator* inline_cg = this->call_generator(callee, |
119 vtable_index, call_is_virtual, jvms, allow_inline, prof_factor, false); | 119 vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, false); |
120 if (inline_cg != NULL) { | 120 if (inline_cg != NULL) { |
121 cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg); | 121 cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg); |
122 } | 122 } |
123 } | 123 } |
124 return cg; | 124 return cg; |
129 // NOTE: This must happen before normal inlining logic below since | 129 // NOTE: This must happen before normal inlining logic below since |
130 // MethodHandle.invoke* are native methods which obviously don't | 130 // MethodHandle.invoke* are native methods which obviously don't |
131 // have bytecodes and so normal inlining fails. | 131 // have bytecodes and so normal inlining fails. |
132 if (callee->is_method_handle_intrinsic()) { | 132 if (callee->is_method_handle_intrinsic()) { |
133 CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, delayed_forbidden); | 133 CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, delayed_forbidden); |
134 assert (cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator"); | 134 assert(cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator"); |
135 return cg; | 135 return cg; |
136 } | 136 } |
137 | 137 |
138 // Do not inline strict fp into non-strict code, or the reverse | 138 // Do not inline strict fp into non-strict code, or the reverse |
139 if (caller->is_strict() ^ callee->is_strict()) { | 139 if (caller->is_strict() ^ callee->is_strict()) { |
147 float past_uses = jvms->method()->scale_count(site_count, prof_factor); | 147 float past_uses = jvms->method()->scale_count(site_count, prof_factor); |
148 // This is the number of times we expect the call code to be used. | 148 // This is the number of times we expect the call code to be used. |
149 float expected_uses = past_uses; | 149 float expected_uses = past_uses; |
150 | 150 |
151 // Try inlining a bytecoded method: | 151 // Try inlining a bytecoded method: |
152 if (!call_is_virtual) { | 152 if (!call_does_dispatch) { |
153 InlineTree* ilt; | 153 InlineTree* ilt; |
154 if (UseOldInlining) { | 154 if (UseOldInlining) { |
155 ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method()); | 155 ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method()); |
156 } else { | 156 } else { |
157 // Make a disembodied, stateless ILT. | 157 // Make a disembodied, stateless ILT. |
186 if (cg == NULL || should_delay) { | 186 if (cg == NULL || should_delay) { |
187 // Fall through. | 187 // Fall through. |
188 } else if (require_inline || !InlineWarmCalls) { | 188 } else if (require_inline || !InlineWarmCalls) { |
189 return cg; | 189 return cg; |
190 } else { | 190 } else { |
191 CallGenerator* cold_cg = call_generator(callee, vtable_index, call_is_virtual, jvms, false, prof_factor); | 191 CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor); |
192 return CallGenerator::for_warm_call(ci, cold_cg, cg); | 192 return CallGenerator::for_warm_call(ci, cold_cg, cg); |
193 } | 193 } |
194 } | 194 } |
195 } | 195 } |
196 | 196 |
197 // Try using the type profile. | 197 // Try using the type profile. |
198 if (call_is_virtual && site_count > 0 && receiver_count > 0) { | 198 if (call_does_dispatch && site_count > 0 && receiver_count > 0) { |
199 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count. | 199 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count. |
200 bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent); | 200 bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent); |
201 ciMethod* receiver_method = NULL; | 201 ciMethod* receiver_method = NULL; |
202 if (have_major_receiver || profile.morphism() == 1 || | 202 if (have_major_receiver || profile.morphism() == 1 || |
203 (profile.morphism() == 2 && UseBimorphicInlining)) { | 203 (profile.morphism() == 2 && UseBimorphicInlining)) { |
207 profile.receiver(0)); | 207 profile.receiver(0)); |
208 } | 208 } |
209 if (receiver_method != NULL) { | 209 if (receiver_method != NULL) { |
210 // The single majority receiver sufficiently outweighs the minority. | 210 // The single majority receiver sufficiently outweighs the minority. |
211 CallGenerator* hit_cg = this->call_generator(receiver_method, | 211 CallGenerator* hit_cg = this->call_generator(receiver_method, |
212 vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor); | 212 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor); |
213 if (hit_cg != NULL) { | 213 if (hit_cg != NULL) { |
214 // Look up second receiver. | 214 // Look up second receiver. |
215 CallGenerator* next_hit_cg = NULL; | 215 CallGenerator* next_hit_cg = NULL; |
216 ciMethod* next_receiver_method = NULL; | 216 ciMethod* next_receiver_method = NULL; |
217 if (profile.morphism() == 2 && UseBimorphicInlining) { | 217 if (profile.morphism() == 2 && UseBimorphicInlining) { |
218 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(), | 218 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(), |
219 profile.receiver(1)); | 219 profile.receiver(1)); |
220 if (next_receiver_method != NULL) { | 220 if (next_receiver_method != NULL) { |
221 next_hit_cg = this->call_generator(next_receiver_method, | 221 next_hit_cg = this->call_generator(next_receiver_method, |
222 vtable_index, !call_is_virtual, jvms, | 222 vtable_index, !call_does_dispatch, jvms, |
223 allow_inline, prof_factor); | 223 allow_inline, prof_factor); |
224 if (next_hit_cg != NULL && !next_hit_cg->is_inline() && | 224 if (next_hit_cg != NULL && !next_hit_cg->is_inline() && |
225 have_major_receiver && UseOnlyInlinedBimorphic) { | 225 have_major_receiver && UseOnlyInlinedBimorphic) { |
226 // Skip if we can't inline second receiver's method | 226 // Skip if we can't inline second receiver's method |
227 next_hit_cg = NULL; | 227 next_hit_cg = NULL; |
263 } | 263 } |
264 } | 264 } |
265 | 265 |
266 // There was no special inlining tactic, or it bailed out. | 266 // There was no special inlining tactic, or it bailed out. |
267 // Use a more generic tactic, like a simple call. | 267 // Use a more generic tactic, like a simple call. |
268 if (call_is_virtual) { | 268 if (call_does_dispatch) { |
269 return CallGenerator::for_virtual_call(callee, vtable_index); | 269 return CallGenerator::for_virtual_call(callee, vtable_index); |
270 } else { | 270 } else { |
271 // Class Hierarchy Analysis or Type Profile reveals a unique target, | 271 // Class Hierarchy Analysis or Type Profile reveals a unique target, |
272 // or it is a static or special call. | 272 // or it is a static or special call. |
273 return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms)); | 273 return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms)); |
395 // an invokevirtual directly on an interface method I.m if K implements I. | 395 // an invokevirtual directly on an interface method I.m if K implements I. |
396 | 396 |
397 // orig_callee is the resolved callee which's signature includes the | 397 // orig_callee is the resolved callee which's signature includes the |
398 // appendix argument. | 398 // appendix argument. |
399 const int nargs = orig_callee->arg_size(); | 399 const int nargs = orig_callee->arg_size(); |
400 const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id()); | |
400 | 401 |
401 // Push appendix argument (MethodType, CallSite, etc.), if one. | 402 // Push appendix argument (MethodType, CallSite, etc.), if one. |
402 if (iter().has_appendix()) { | 403 if (iter().has_appendix()) { |
403 ciObject* appendix_arg = iter().get_appendix(); | 404 ciObject* appendix_arg = iter().get_appendix(); |
404 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg); | 405 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg); |
411 // Then we may inline or make a static call, but become dependent on there being only 1 target. | 412 // Then we may inline or make a static call, but become dependent on there being only 1 target. |
412 // Does the call-site type profile reveal only one receiver? | 413 // Does the call-site type profile reveal only one receiver? |
413 // Then we may introduce a run-time check and inline on the path where it succeeds. | 414 // Then we may introduce a run-time check and inline on the path where it succeeds. |
414 // The other path may uncommon_trap, check for another receiver, or do a v-call. | 415 // The other path may uncommon_trap, check for another receiver, or do a v-call. |
415 | 416 |
416 // Choose call strategy. | |
417 bool call_is_virtual = is_virtual_or_interface; | |
418 int vtable_index = Method::invalid_vtable_index; | |
419 ciMethod* callee = orig_callee; | |
420 | |
421 // Try to get the most accurate receiver type | 417 // Try to get the most accurate receiver type |
418 ciMethod* callee = orig_callee; | |
419 int vtable_index = Method::invalid_vtable_index; | |
420 bool call_does_dispatch = false; | |
421 | |
422 if (is_virtual_or_interface) { | 422 if (is_virtual_or_interface) { |
423 Node* receiver_node = stack(sp() - nargs); | 423 Node* receiver_node = stack(sp() - nargs); |
424 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); | 424 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); |
425 ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, orig_callee, receiver_type); | 425 // call_does_dispatch and vtable_index are out-parameters. They might be changed. |
426 | 426 callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type, |
427 // Have the call been sufficiently improved such that it is no longer a virtual? | 427 is_virtual, |
428 if (optimized_virtual_method != NULL) { | 428 call_does_dispatch, vtable_index); // out-parameters |
429 callee = optimized_virtual_method; | |
430 call_is_virtual = false; | |
431 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) { | |
432 // We can make a vtable call at this site | |
433 vtable_index = callee->resolve_vtable_index(method()->holder(), klass); | |
434 } | |
435 } | 429 } |
436 | 430 |
437 // Note: It's OK to try to inline a virtual call. | 431 // Note: It's OK to try to inline a virtual call. |
438 // The call generator will not attempt to inline a polymorphic call | 432 // The call generator will not attempt to inline a polymorphic call |
439 // unless it knows how to optimize the receiver dispatch. | 433 // unless it knows how to optimize the receiver dispatch. |
445 | 439 |
446 // --------------------- | 440 // --------------------- |
447 // Decide call tactic. | 441 // Decide call tactic. |
448 // This call checks with CHA, the interpreter profile, intrinsics table, etc. | 442 // This call checks with CHA, the interpreter profile, intrinsics table, etc. |
449 // It decides whether inlining is desirable or not. | 443 // It decides whether inlining is desirable or not. |
450 CallGenerator* cg = C->call_generator(callee, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); | 444 CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor()); |
451 | 445 |
452 // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead. | 446 // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead. |
453 orig_callee = callee = NULL; | 447 orig_callee = callee = NULL; |
454 | 448 |
455 // --------------------- | 449 // --------------------- |
485 | 479 |
486 // This can happen if a library intrinsic is available, but refuses | 480 // This can happen if a library intrinsic is available, but refuses |
487 // the call site, perhaps because it did not match a pattern the | 481 // the call site, perhaps because it did not match a pattern the |
488 // intrinsic was expecting to optimize. Should always be possible to | 482 // intrinsic was expecting to optimize. Should always be possible to |
489 // get a normal java call that may inline in that case | 483 // get a normal java call that may inline in that case |
490 cg = C->call_generator(cg->method(), vtable_index, call_is_virtual, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false); | 484 cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false); |
491 if ((new_jvms = cg->generate(jvms)) == NULL) { | 485 if ((new_jvms = cg->generate(jvms)) == NULL) { |
492 guarantee(failing(), "call failed to generate: calls should work"); | 486 guarantee(failing(), "call failed to generate: calls should work"); |
493 return; | 487 return; |
494 } | 488 } |
495 } | 489 } |
520 | 514 |
521 // Round double result after a call from strict to non-strict code | 515 // Round double result after a call from strict to non-strict code |
522 round_double_result(cg->method()); | 516 round_double_result(cg->method()); |
523 | 517 |
524 ciType* rtype = cg->method()->return_type(); | 518 ciType* rtype = cg->method()->return_type(); |
525 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw())) { | 519 ciType* ctype = declared_signature->return_type(); |
520 | |
521 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) { | |
526 // Be careful here with return types. | 522 // Be careful here with return types. |
527 ciType* ctype = declared_signature->return_type(); | |
528 if (ctype != rtype) { | 523 if (ctype != rtype) { |
529 BasicType rt = rtype->basic_type(); | 524 BasicType rt = rtype->basic_type(); |
530 BasicType ct = ctype->basic_type(); | 525 BasicType ct = ctype->basic_type(); |
531 Node* retnode = peek(); | |
532 if (ct == T_VOID) { | 526 if (ct == T_VOID) { |
533 // It's OK for a method to return a value that is discarded. | 527 // It's OK for a method to return a value that is discarded. |
534 // The discarding does not require any special action from the caller. | 528 // The discarding does not require any special action from the caller. |
535 // The Java code knows this, at VerifyType.isNullConversion. | 529 // The Java code knows this, at VerifyType.isNullConversion. |
536 pop_node(rt); // whatever it was, pop it | 530 pop_node(rt); // whatever it was, pop it |
537 retnode = top(); | |
538 } else if (rt == T_INT || is_subword_type(rt)) { | 531 } else if (rt == T_INT || is_subword_type(rt)) { |
539 // FIXME: This logic should be factored out. | 532 // Nothing. These cases are handled in lambda form bytecode. |
540 if (ct == T_BOOLEAN) { | 533 assert(ct == T_INT || is_subword_type(ct), err_msg_res("must match: rt=%s, ct=%s", type2name(rt), type2name(ct))); |
541 retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0x1)) ); | |
542 } else if (ct == T_CHAR) { | |
543 retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFFFF)) ); | |
544 } else if (ct == T_BYTE) { | |
545 retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(24)) ); | |
546 retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(24)) ); | |
547 } else if (ct == T_SHORT) { | |
548 retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(16)) ); | |
549 retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(16)) ); | |
550 } else { | |
551 assert(ct == T_INT, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct))); | |
552 } | |
553 } else if (rt == T_OBJECT || rt == T_ARRAY) { | 534 } else if (rt == T_OBJECT || rt == T_ARRAY) { |
554 assert(ct == T_OBJECT || ct == T_ARRAY, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct))); | 535 assert(ct == T_OBJECT || ct == T_ARRAY, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct))); |
555 if (ctype->is_loaded()) { | 536 if (ctype->is_loaded()) { |
556 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); | 537 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); |
557 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); | 538 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); |
558 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { | 539 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { |
540 Node* retnode = pop(); | |
559 Node* cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(), retnode, sig_type)); | 541 Node* cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(), retnode, sig_type)); |
560 pop(); | |
561 push(cast_obj); | 542 push(cast_obj); |
562 } | 543 } |
563 } | 544 } |
564 } else { | 545 } else { |
565 assert(ct == rt, err_msg("unexpected mismatch rt=%d, ct=%d", rt, ct)); | 546 assert(rt == ct, err_msg_res("unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct))); |
566 // push a zero; it's better than getting an oop/int mismatch | 547 // push a zero; it's better than getting an oop/int mismatch |
567 retnode = pop_node(rt); | 548 pop_node(rt); |
568 retnode = zerocon(ct); | 549 Node* retnode = zerocon(ct); |
569 push_node(ct, retnode); | 550 push_node(ct, retnode); |
570 } | 551 } |
571 // Now that the value is well-behaved, continue with the call-site type. | 552 // Now that the value is well-behaved, continue with the call-site type. |
572 rtype = ctype; | 553 rtype = ctype; |
573 } | 554 } |
555 } else { | |
556 assert(rtype == ctype, "mismatched return types"); // symbolic resolution enforces this | |
574 } | 557 } |
575 | 558 |
576 // If the return type of the method is not loaded, assert that the | 559 // If the return type of the method is not loaded, assert that the |
577 // value we got is a null. Otherwise, we need to recompile. | 560 // value we got is a null. Otherwise, we need to recompile. |
578 if (!rtype->is_loaded()) { | 561 if (!rtype->is_loaded()) { |
886 } | 869 } |
887 } | 870 } |
888 #endif //PRODUCT | 871 #endif //PRODUCT |
889 | 872 |
890 | 873 |
874 ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, | |
875 ciMethod* callee, const TypeOopPtr* receiver_type, | |
876 bool is_virtual, | |
877 bool& call_does_dispatch, int& vtable_index) { | |
878 // Set default values for out-parameters. | |
879 call_does_dispatch = true; | |
880 vtable_index = Method::invalid_vtable_index; | |
881 | |
882 // Choose call strategy. | |
883 ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee, receiver_type); | |
884 | |
885 // Have the call been sufficiently improved such that it is no longer a virtual? | |
886 if (optimized_virtual_method != NULL) { | |
887 callee = optimized_virtual_method; | |
888 call_does_dispatch = false; | |
889 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) { | |
890 // We can make a vtable call at this site | |
891 vtable_index = callee->resolve_vtable_index(caller->holder(), klass); | |
892 } | |
893 return callee; | |
894 } | |
895 | |
891 // Identify possible target method and inlining style | 896 // Identify possible target method and inlining style |
892 ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, | 897 ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, |
893 ciMethod *dest_method, const TypeOopPtr* receiver_type) { | 898 ciMethod* callee, const TypeOopPtr* receiver_type) { |
894 // only use for virtual or interface calls | 899 // only use for virtual or interface calls |
895 | 900 |
896 // If it is obviously final, do not bother to call find_monomorphic_target, | 901 // If it is obviously final, do not bother to call find_monomorphic_target, |
897 // because the class hierarchy checks are not needed, and may fail due to | 902 // because the class hierarchy checks are not needed, and may fail due to |
898 // incompletely loaded classes. Since we do our own class loading checks | 903 // incompletely loaded classes. Since we do our own class loading checks |
899 // in this module, we may confidently bind to any method. | 904 // in this module, we may confidently bind to any method. |
900 if (dest_method->can_be_statically_bound()) { | 905 if (callee->can_be_statically_bound()) { |
901 return dest_method; | 906 return callee; |
902 } | 907 } |
903 | 908 |
904 // Attempt to improve the receiver | 909 // Attempt to improve the receiver |
905 bool actual_receiver_is_exact = false; | 910 bool actual_receiver_is_exact = false; |
906 ciInstanceKlass* actual_receiver = klass; | 911 ciInstanceKlass* actual_receiver = klass; |
907 if (receiver_type != NULL) { | 912 if (receiver_type != NULL) { |
908 // Array methods are all inherited from Object, and are monomorphic. | 913 // Array methods are all inherited from Object, and are monomorphic. |
909 if (receiver_type->isa_aryptr() && | 914 if (receiver_type->isa_aryptr() && |
910 dest_method->holder() == env()->Object_klass()) { | 915 callee->holder() == env()->Object_klass()) { |
911 return dest_method; | 916 return callee; |
912 } | 917 } |
913 | 918 |
914 // All other interesting cases are instance klasses. | 919 // All other interesting cases are instance klasses. |
915 if (!receiver_type->isa_instptr()) { | 920 if (!receiver_type->isa_instptr()) { |
916 return NULL; | 921 return NULL; |
926 actual_receiver_is_exact = receiver_type->klass_is_exact(); | 931 actual_receiver_is_exact = receiver_type->klass_is_exact(); |
927 } | 932 } |
928 } | 933 } |
929 | 934 |
930 ciInstanceKlass* calling_klass = caller->holder(); | 935 ciInstanceKlass* calling_klass = caller->holder(); |
931 ciMethod* cha_monomorphic_target = dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver); | 936 ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver); |
932 if (cha_monomorphic_target != NULL) { | 937 if (cha_monomorphic_target != NULL) { |
933 assert(!cha_monomorphic_target->is_abstract(), ""); | 938 assert(!cha_monomorphic_target->is_abstract(), ""); |
934 // Look at the method-receiver type. Does it add "too much information"? | 939 // Look at the method-receiver type. Does it add "too much information"? |
935 ciKlass* mr_klass = cha_monomorphic_target->holder(); | 940 ciKlass* mr_klass = cha_monomorphic_target->holder(); |
936 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass); | 941 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass); |
944 tty->print_cr("found unique CHA method, but could not cast up"); | 949 tty->print_cr("found unique CHA method, but could not cast up"); |
945 tty->print(" method = "); | 950 tty->print(" method = "); |
946 cha_monomorphic_target->print(); | 951 cha_monomorphic_target->print(); |
947 tty->cr(); | 952 tty->cr(); |
948 } | 953 } |
949 if (C->log() != NULL) { | 954 if (log() != NULL) { |
950 C->log()->elem("missed_CHA_opportunity klass='%d' method='%d'", | 955 log()->elem("missed_CHA_opportunity klass='%d' method='%d'", |
951 C->log()->identify(klass), | 956 log()->identify(klass), |
952 C->log()->identify(cha_monomorphic_target)); | 957 log()->identify(cha_monomorphic_target)); |
953 } | 958 } |
954 cha_monomorphic_target = NULL; | 959 cha_monomorphic_target = NULL; |
955 } | 960 } |
956 } | 961 } |
957 if (cha_monomorphic_target != NULL) { | 962 if (cha_monomorphic_target != NULL) { |
959 // If we inlined because CHA revealed only a single target method, | 964 // If we inlined because CHA revealed only a single target method, |
960 // then we are dependent on that target method not getting overridden | 965 // then we are dependent on that target method not getting overridden |
961 // by dynamic class loading. Be sure to test the "static" receiver | 966 // by dynamic class loading. Be sure to test the "static" receiver |
962 // dest_method here, as opposed to the actual receiver, which may | 967 // dest_method here, as opposed to the actual receiver, which may |
963 // falsely lead us to believe that the receiver is final or private. | 968 // falsely lead us to believe that the receiver is final or private. |
964 C->dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target); | 969 dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target); |
965 return cha_monomorphic_target; | 970 return cha_monomorphic_target; |
966 } | 971 } |
967 | 972 |
968 // If the type is exact, we can still bind the method w/o a vcall. | 973 // If the type is exact, we can still bind the method w/o a vcall. |
969 // (This case comes after CHA so we can see how much extra work it does.) | 974 // (This case comes after CHA so we can see how much extra work it does.) |
970 if (actual_receiver_is_exact) { | 975 if (actual_receiver_is_exact) { |
971 // In case of evolution, there is a dependence on every inlined method, since each | 976 // In case of evolution, there is a dependence on every inlined method, since each |
972 // such method can be changed when its class is redefined. | 977 // such method can be changed when its class is redefined. |
973 ciMethod* exact_method = dest_method->resolve_invoke(calling_klass, actual_receiver); | 978 ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver); |
974 if (exact_method != NULL) { | 979 if (exact_method != NULL) { |
975 #ifndef PRODUCT | 980 #ifndef PRODUCT |
976 if (PrintOpto) { | 981 if (PrintOpto) { |
977 tty->print(" Calling method via exact type @%d --- ", bci); | 982 tty->print(" Calling method via exact type @%d --- ", bci); |
978 exact_method->print_name(); | 983 exact_method->print_name(); |