Mercurial > hg > truffle
comparison src/share/vm/opto/callGenerator.cpp @ 6275:957c266d8bc5
Merge with http://hg.openjdk.java.net/hsx/hsx24/hotspot/
author | Doug Simon <doug.simon@oracle.com> |
---|---|
date | Tue, 21 Aug 2012 10:39:19 +0200 |
parents | 6c5b7a6becc8 |
children | 7a302948f5a4 |
comparison
equal
deleted
inserted
replaced
5891:fd8832ae511d | 6275:957c266d8bc5 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
24 | 24 |
25 #include "precompiled.hpp" | 25 #include "precompiled.hpp" |
26 #include "ci/bcEscapeAnalyzer.hpp" | 26 #include "ci/bcEscapeAnalyzer.hpp" |
27 #include "ci/ciCallSite.hpp" | 27 #include "ci/ciCallSite.hpp" |
28 #include "ci/ciCPCache.hpp" | 28 #include "ci/ciCPCache.hpp" |
29 #include "ci/ciMemberName.hpp" | |
29 #include "ci/ciMethodHandle.hpp" | 30 #include "ci/ciMethodHandle.hpp" |
30 #include "classfile/javaClasses.hpp" | 31 #include "classfile/javaClasses.hpp" |
31 #include "compiler/compileLog.hpp" | 32 #include "compiler/compileLog.hpp" |
32 #include "opto/addnode.hpp" | 33 #include "opto/addnode.hpp" |
33 #include "opto/callGenerator.hpp" | 34 #include "opto/callGenerator.hpp" |
37 #include "opto/parse.hpp" | 38 #include "opto/parse.hpp" |
38 #include "opto/rootnode.hpp" | 39 #include "opto/rootnode.hpp" |
39 #include "opto/runtime.hpp" | 40 #include "opto/runtime.hpp" |
40 #include "opto/subnode.hpp" | 41 #include "opto/subnode.hpp" |
41 | 42 |
42 CallGenerator::CallGenerator(ciMethod* method) { | |
43 _method = method; | |
44 } | |
45 | 43 |
46 // Utility function. | 44 // Utility function. |
47 const TypeFunc* CallGenerator::tf() const { | 45 const TypeFunc* CallGenerator::tf() const { |
48 return TypeFunc::make(method()); | 46 return TypeFunc::make(method()); |
49 } | 47 } |
135 if (kit.C->log() != NULL) { | 133 if (kit.C->log() != NULL) { |
136 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); | 134 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); |
137 } | 135 } |
138 | 136 |
139 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci()); | 137 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci()); |
138 _call_node = call; // Save the call node in case we need it later | |
140 if (!is_static) { | 139 if (!is_static) { |
141 // Make an explicit receiver null_check as part of this call. | 140 // Make an explicit receiver null_check as part of this call. |
142 // Since we share a map with the caller, his JVMS gets adjusted. | 141 // Since we share a map with the caller, his JVMS gets adjusted. |
143 kit.null_check_receiver(method()); | 142 kit.null_check_receiver(method()); |
144 if (kit.stopped()) { | 143 if (kit.stopped()) { |
145 // And dump it back to the caller, decorated with any exceptions: | 144 // And dump it back to the caller, decorated with any exceptions: |
146 return kit.transfer_exceptions_into_jvms(); | 145 return kit.transfer_exceptions_into_jvms(); |
147 } | 146 } |
148 // Mark the call node as virtual, sort of: | 147 // Mark the call node as virtual, sort of: |
149 call->set_optimized_virtual(true); | 148 call->set_optimized_virtual(true); |
150 if (method()->is_method_handle_invoke()) { | 149 if (method()->is_method_handle_intrinsic() || |
150 method()->is_compiled_lambda_form()) { | |
151 call->set_method_handle_invoke(true); | 151 call->set_method_handle_invoke(true); |
152 } | 152 } |
153 } | 153 } |
154 kit.set_arguments_for_java_call(call); | 154 kit.set_arguments_for_java_call(call); |
155 kit.set_edges_for_java_call(call, false, _separate_io_proj); | 155 kit.set_edges_for_java_call(call, false, _separate_io_proj); |
156 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); | 156 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); |
157 kit.push_node(method()->return_type()->basic_type(), ret); | 157 kit.push_node(method()->return_type()->basic_type(), ret); |
158 _call_node = call; // Save the call node in case we need it later | |
159 return kit.transfer_exceptions_into_jvms(); | 158 return kit.transfer_exceptions_into_jvms(); |
160 } | 159 } |
161 | 160 |
162 //---------------------------DynamicCallGenerator----------------------------- | 161 //---------------------------DynamicCallGenerator----------------------------- |
163 // Internal class which handles all out-of-line invokedynamic calls. | 162 // Internal class which handles all out-of-line invokedynamic calls. |
170 virtual JVMState* generate(JVMState* jvms); | 169 virtual JVMState* generate(JVMState* jvms); |
171 }; | 170 }; |
172 | 171 |
173 JVMState* DynamicCallGenerator::generate(JVMState* jvms) { | 172 JVMState* DynamicCallGenerator::generate(JVMState* jvms) { |
174 GraphKit kit(jvms); | 173 GraphKit kit(jvms); |
175 | 174 Compile* C = kit.C; |
176 if (kit.C->log() != NULL) { | 175 PhaseGVN& gvn = kit.gvn(); |
177 kit.C->log()->elem("dynamic_call bci='%d'", jvms->bci()); | 176 |
177 if (C->log() != NULL) { | |
178 C->log()->elem("dynamic_call bci='%d'", jvms->bci()); | |
178 } | 179 } |
179 | 180 |
180 // Get the constant pool cache from the caller class. | 181 // Get the constant pool cache from the caller class. |
181 ciMethod* caller_method = jvms->method(); | 182 ciMethod* caller_method = jvms->method(); |
182 ciBytecodeStream str(caller_method); | 183 ciBytecodeStream str(caller_method); |
188 // pointer. | 189 // pointer. |
189 int index = str.get_method_index(); | 190 int index = str.get_method_index(); |
190 size_t call_site_offset = cpcache->get_f1_offset(index); | 191 size_t call_site_offset = cpcache->get_f1_offset(index); |
191 | 192 |
192 // Load the CallSite object from the constant pool cache. | 193 // Load the CallSite object from the constant pool cache. |
193 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache); | 194 const TypeOopPtr* cpcache_type = TypeOopPtr::make_from_constant(cpcache); // returns TypeAryPtr of type T_OBJECT |
194 Node* cpcache_adr = kit.makecon(cpcache_ptr); | 195 const TypeOopPtr* call_site_type = TypeOopPtr::make_from_klass(C->env()->CallSite_klass()); |
195 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset); | 196 Node* cpcache_adr = kit.makecon(cpcache_type); |
196 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); | 197 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, call_site_offset); |
198 // The oops in the constant pool cache are not compressed; load then as raw pointers. | |
199 Node* call_site = kit.make_load(kit.control(), call_site_adr, call_site_type, T_ADDRESS, Compile::AliasIdxRaw); | |
197 | 200 |
198 // Load the target MethodHandle from the CallSite object. | 201 // Load the target MethodHandle from the CallSite object. |
199 Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes()); | 202 const TypeOopPtr* target_type = TypeOopPtr::make_from_klass(C->env()->MethodHandle_klass()); |
200 Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT); | 203 Node* target_mh_adr = kit.basic_plus_adr(call_site, java_lang_invoke_CallSite::target_offset_in_bytes()); |
204 Node* target_mh = kit.make_load(kit.control(), target_mh_adr, target_type, T_OBJECT); | |
201 | 205 |
202 address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub(); | 206 address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub(); |
203 | 207 |
204 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci()); | 208 CallStaticJavaNode* call = new (C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci()); |
205 // invokedynamic is treated as an optimized invokevirtual. | 209 // invokedynamic is treated as an optimized invokevirtual. |
206 call->set_optimized_virtual(true); | 210 call->set_optimized_virtual(true); |
207 // Take extra care (in the presence of argument motion) not to trash the SP: | 211 // Take extra care (in the presence of argument motion) not to trash the SP: |
208 call->set_method_handle_invoke(true); | 212 call->set_method_handle_invoke(true); |
209 | 213 |
318 return new DirectCallGenerator(m, separate_io_proj); | 322 return new DirectCallGenerator(m, separate_io_proj); |
319 } | 323 } |
320 | 324 |
321 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { | 325 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { |
322 assert(!m->is_static(), "for_virtual_call mismatch"); | 326 assert(!m->is_static(), "for_virtual_call mismatch"); |
323 assert(!m->is_method_handle_invoke(), "should be a direct call"); | 327 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); |
324 return new VirtualCallGenerator(m, vtable_index); | 328 return new VirtualCallGenerator(m, vtable_index); |
325 } | 329 } |
326 | 330 |
327 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) { | 331 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) { |
328 assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch"); | 332 assert(m->is_compiled_lambda_form(), "for_dynamic_call mismatch"); |
333 //@@ FIXME: this should be done via a direct call | |
329 return new DynamicCallGenerator(m); | 334 return new DynamicCallGenerator(m); |
330 } | 335 } |
331 | 336 |
332 // Allow inlining decisions to be delayed | 337 // Allow inlining decisions to be delayed |
333 class LateInlineCallGenerator : public DirectCallGenerator { | 338 class LateInlineCallGenerator : public DirectCallGenerator { |
647 } | 652 } |
648 return kit.transfer_exceptions_into_jvms(); | 653 return kit.transfer_exceptions_into_jvms(); |
649 } | 654 } |
650 | 655 |
651 | 656 |
652 //------------------------PredictedDynamicCallGenerator----------------------- | 657 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee) { |
653 // Internal class which handles all out-of-line calls checking receiver type. | 658 assert(callee->is_method_handle_intrinsic() || |
654 class PredictedDynamicCallGenerator : public CallGenerator { | 659 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch"); |
655 ciMethodHandle* _predicted_method_handle; | 660 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee); |
656 CallGenerator* _if_missed; | |
657 CallGenerator* _if_hit; | |
658 float _hit_prob; | |
659 | |
660 public: | |
661 PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle, | |
662 CallGenerator* if_missed, | |
663 CallGenerator* if_hit, | |
664 float hit_prob) | |
665 : CallGenerator(if_missed->method()), | |
666 _predicted_method_handle(predicted_method_handle), | |
667 _if_missed(if_missed), | |
668 _if_hit(if_hit), | |
669 _hit_prob(hit_prob) | |
670 {} | |
671 | |
672 virtual bool is_inline() const { return _if_hit->is_inline(); } | |
673 virtual bool is_deferred() const { return _if_hit->is_deferred(); } | |
674 | |
675 virtual JVMState* generate(JVMState* jvms); | |
676 }; | |
677 | |
678 | |
679 CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle, | |
680 CallGenerator* if_missed, | |
681 CallGenerator* if_hit, | |
682 float hit_prob) { | |
683 return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob); | |
684 } | |
685 | |
686 | |
687 CallGenerator* CallGenerator::for_method_handle_call(Node* method_handle, JVMState* jvms, | |
688 ciMethod* caller, ciMethod* callee, ciCallProfile profile) { | |
689 assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_method_handle_call mismatch"); | |
690 CallGenerator* cg = CallGenerator::for_method_handle_inline(method_handle, jvms, caller, callee, profile); | |
691 if (cg != NULL) | 661 if (cg != NULL) |
692 return cg; | 662 return cg; |
693 return CallGenerator::for_direct_call(callee); | 663 return CallGenerator::for_direct_call(callee); |
694 } | 664 } |
695 | 665 |
696 CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms, | 666 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) { |
697 ciMethod* caller, ciMethod* callee, ciCallProfile profile) { | |
698 if (method_handle->Opcode() == Op_ConP) { | |
699 const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr(); | |
700 ciObject* const_oop = oop_ptr->const_oop(); | |
701 ciMethodHandle* method_handle = const_oop->as_method_handle(); | |
702 | |
703 // Set the callee to have access to the class and signature in | |
704 // the MethodHandleCompiler. | |
705 method_handle->set_callee(callee); | |
706 method_handle->set_caller(caller); | |
707 method_handle->set_call_profile(profile); | |
708 | |
709 // Get an adapter for the MethodHandle. | |
710 ciMethod* target_method = method_handle->get_method_handle_adapter(); | |
711 if (target_method != NULL) { | |
712 CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); | |
713 if (cg != NULL && cg->is_inline()) | |
714 return cg; | |
715 } | |
716 } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 && | |
717 method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) { | |
718 float prob = PROB_FAIR; | |
719 Node* meth_region = method_handle->in(0); | |
720 if (meth_region->is_Region() && | |
721 meth_region->in(1)->is_Proj() && meth_region->in(2)->is_Proj() && | |
722 meth_region->in(1)->in(0) == meth_region->in(2)->in(0) && | |
723 meth_region->in(1)->in(0)->is_If()) { | |
724 // If diamond, so grab the probability of the test to drive the inlining below | |
725 prob = meth_region->in(1)->in(0)->as_If()->_prob; | |
726 if (meth_region->in(1)->is_IfTrue()) { | |
727 prob = 1 - prob; | |
728 } | |
729 } | |
730 | |
731 // selectAlternative idiom merging two constant MethodHandles. | |
732 // Generate a guard so that each can be inlined. We might want to | |
733 // do more inputs at later point but this gets the most common | |
734 // case. | |
735 CallGenerator* cg1 = for_method_handle_call(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob)); | |
736 CallGenerator* cg2 = for_method_handle_call(method_handle->in(2), jvms, caller, callee, profile.rescale(prob)); | |
737 if (cg1 != NULL && cg2 != NULL) { | |
738 const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr(); | |
739 ciObject* const_oop = oop_ptr->const_oop(); | |
740 ciMethodHandle* mh = const_oop->as_method_handle(); | |
741 return new PredictedDynamicCallGenerator(mh, cg2, cg1, prob); | |
742 } | |
743 } | |
744 return NULL; | |
745 } | |
746 | |
747 CallGenerator* CallGenerator::for_invokedynamic_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile) { | |
748 assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_invokedynamic_call mismatch"); | |
749 // Get the CallSite object. | |
750 ciBytecodeStream str(caller); | |
751 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. | |
752 ciCallSite* call_site = str.get_call_site(); | |
753 CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, callee, profile); | |
754 if (cg != NULL) | |
755 return cg; | |
756 return CallGenerator::for_dynamic_call(callee); | |
757 } | |
758 | |
759 CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, | |
760 ciMethod* caller, ciMethod* callee, ciCallProfile profile) { | |
761 ciMethodHandle* method_handle = call_site->get_target(); | |
762 | |
763 // Set the callee to have access to the class and signature in the | |
764 // MethodHandleCompiler. | |
765 method_handle->set_callee(callee); | |
766 method_handle->set_caller(caller); | |
767 method_handle->set_call_profile(profile); | |
768 | |
769 // Get an adapter for the MethodHandle. | |
770 ciMethod* target_method = method_handle->get_invokedynamic_adapter(); | |
771 if (target_method != NULL) { | |
772 Compile *C = Compile::current(); | |
773 CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); | |
774 if (cg != NULL && cg->is_inline()) { | |
775 // Add a dependence for invalidation of the optimization. | |
776 if (!call_site->is_constant_call_site()) { | |
777 C->dependencies()->assert_call_site_target_value(call_site, method_handle); | |
778 } | |
779 return cg; | |
780 } | |
781 } | |
782 return NULL; | |
783 } | |
784 | |
785 | |
786 JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) { | |
787 GraphKit kit(jvms); | 667 GraphKit kit(jvms); |
788 PhaseGVN& gvn = kit.gvn(); | 668 PhaseGVN& gvn = kit.gvn(); |
789 | 669 Compile* C = kit.C; |
790 CompileLog* log = kit.C->log(); | 670 vmIntrinsics::ID iid = callee->intrinsic_id(); |
791 if (log != NULL) { | 671 switch (iid) { |
792 log->elem("predicted_dynamic_call bci='%d'", jvms->bci()); | 672 case vmIntrinsics::_invokeBasic: |
793 } | 673 { |
794 | 674 // get MethodHandle receiver |
795 const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true); | 675 Node* receiver = kit.argument(0); |
796 Node* predicted_mh = kit.makecon(predicted_mh_ptr); | 676 if (receiver->Opcode() == Op_ConP) { |
797 | 677 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); |
798 Node* bol = NULL; | 678 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); |
799 int bc = jvms->method()->java_code_at_bci(jvms->bci()); | 679 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove |
800 if (bc != Bytecodes::_invokedynamic) { | 680 const int vtable_index = methodOopDesc::invalid_vtable_index; |
801 // This is the selectAlternative idiom for guardWithTest or | 681 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS); |
802 // similar idioms. | 682 if (cg != NULL && cg->is_inline()) |
803 Node* receiver = kit.argument(0); | 683 return cg; |
804 | 684 } else { |
805 // Check if the MethodHandle is the expected one | 685 if (PrintInlining) CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant"); |
806 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(receiver, predicted_mh)); | 686 } |
807 bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) ); | 687 } |
808 } else { | 688 break; |
809 // Get the constant pool cache from the caller class. | 689 |
810 ciMethod* caller_method = jvms->method(); | 690 case vmIntrinsics::_linkToVirtual: |
811 ciBytecodeStream str(caller_method); | 691 case vmIntrinsics::_linkToStatic: |
812 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. | 692 case vmIntrinsics::_linkToSpecial: |
813 ciCPCache* cpcache = str.get_cpcache(); | 693 case vmIntrinsics::_linkToInterface: |
814 | 694 { |
815 // Get the offset of the CallSite from the constant pool cache | 695 // pop MemberName argument |
816 // pointer. | 696 Node* member_name = kit.argument(callee->arg_size() - 1); |
817 int index = str.get_method_index(); | 697 if (member_name->Opcode() == Op_ConP) { |
818 size_t call_site_offset = cpcache->get_f1_offset(index); | 698 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); |
819 | 699 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); |
820 // Load the CallSite object from the constant pool cache. | 700 |
821 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache); | 701 // In lamda forms we erase signature types to avoid resolving issues |
822 Node* cpcache_adr = kit.makecon(cpcache_ptr); | 702 // involving class loaders. When we optimize a method handle invoke |
823 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset); | 703 // to a direct call we must cast the receiver and arguments to its |
824 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); | 704 // actual types. |
825 | 705 ciSignature* signature = target->signature(); |
826 // Load the target MethodHandle from the CallSite object. | 706 const int receiver_skip = target->is_static() ? 0 : 1; |
827 Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes()); | 707 // Cast receiver to its type. |
828 Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT); | 708 if (!target->is_static()) { |
829 | 709 Node* arg = kit.argument(0); |
830 // Check if the MethodHandle is still the same. | 710 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); |
831 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh)); | 711 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); |
832 bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) ); | 712 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { |
833 } | 713 Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type)); |
834 IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN); | 714 kit.set_argument(0, cast_obj); |
835 kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff))); | 715 } |
836 Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff)); | 716 } |
837 | 717 // Cast reference arguments to its type. |
838 SafePointNode* slow_map = NULL; | 718 for (int i = 0; i < signature->count(); i++) { |
839 JVMState* slow_jvms; | 719 ciType* t = signature->type_at(i); |
840 { PreserveJVMState pjvms(&kit); | 720 if (t->is_klass()) { |
841 kit.set_control(slow_ctl); | 721 Node* arg = kit.argument(receiver_skip + i); |
842 if (!kit.stopped()) { | 722 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); |
843 slow_jvms = _if_missed->generate(kit.sync_jvms()); | 723 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); |
844 if (kit.failing()) | 724 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { |
845 return NULL; // might happen because of NodeCountInliningCutoff | 725 Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type)); |
846 assert(slow_jvms != NULL, "must be"); | 726 kit.set_argument(receiver_skip + i, cast_obj); |
847 kit.add_exception_states_from(slow_jvms); | 727 } |
848 kit.set_map(slow_jvms->map()); | 728 } |
849 if (!kit.stopped()) | 729 } |
850 slow_map = kit.stop(); | 730 const int vtable_index = methodOopDesc::invalid_vtable_index; |
851 } | 731 const bool call_is_virtual = target->is_abstract(); // FIXME workaround |
852 } | 732 CallGenerator* cg = C->call_generator(target, vtable_index, call_is_virtual, jvms, true, PROB_ALWAYS); |
853 | 733 if (cg != NULL && cg->is_inline()) |
854 if (kit.stopped()) { | 734 return cg; |
855 // Instance exactly does not matches the desired type. | 735 } |
856 kit.set_jvms(slow_jvms); | 736 } |
857 return kit.transfer_exceptions_into_jvms(); | 737 break; |
858 } | 738 |
859 | 739 default: |
860 // Make the hot call: | 740 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); |
861 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); | 741 break; |
862 if (new_jvms == NULL) { | 742 } |
863 // Inline failed, so make a direct call. | 743 return NULL; |
864 assert(_if_hit->is_inline(), "must have been a failed inline"); | |
865 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); | |
866 new_jvms = cg->generate(kit.sync_jvms()); | |
867 } | |
868 kit.add_exception_states_from(new_jvms); | |
869 kit.set_jvms(new_jvms); | |
870 | |
871 // Need to merge slow and fast? | |
872 if (slow_map == NULL) { | |
873 // The fast path is the only path remaining. | |
874 return kit.transfer_exceptions_into_jvms(); | |
875 } | |
876 | |
877 if (kit.stopped()) { | |
878 // Inlined method threw an exception, so it's just the slow path after all. | |
879 kit.set_jvms(slow_jvms); | |
880 return kit.transfer_exceptions_into_jvms(); | |
881 } | |
882 | |
883 // Finish the diamond. | |
884 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization | |
885 RegionNode* region = new (kit.C, 3) RegionNode(3); | |
886 region->init_req(1, kit.control()); | |
887 region->init_req(2, slow_map->control()); | |
888 kit.set_control(gvn.transform(region)); | |
889 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); | |
890 iophi->set_req(2, slow_map->i_o()); | |
891 kit.set_i_o(gvn.transform(iophi)); | |
892 kit.merge_memory(slow_map->merged_memory(), region, 2); | |
893 uint tos = kit.jvms()->stkoff() + kit.sp(); | |
894 uint limit = slow_map->req(); | |
895 for (uint i = TypeFunc::Parms; i < limit; i++) { | |
896 // Skip unused stack slots; fast forward to monoff(); | |
897 if (i == tos) { | |
898 i = kit.jvms()->monoff(); | |
899 if( i >= limit ) break; | |
900 } | |
901 Node* m = kit.map()->in(i); | |
902 Node* n = slow_map->in(i); | |
903 if (m != n) { | |
904 const Type* t = gvn.type(m)->meet(gvn.type(n)); | |
905 Node* phi = PhiNode::make(region, m, t); | |
906 phi->set_req(2, n); | |
907 kit.map()->set_req(i, gvn.transform(phi)); | |
908 } | |
909 } | |
910 return kit.transfer_exceptions_into_jvms(); | |
911 } | 744 } |
912 | 745 |
913 | 746 |
914 //-------------------------UncommonTrapCallGenerator----------------------------- | 747 //-------------------------UncommonTrapCallGenerator----------------------------- |
915 // Internal class which handles all out-of-line calls checking receiver type. | 748 // Internal class which handles all out-of-line calls checking receiver type. |