Mercurial > hg > truffle
comparison src/share/vm/opto/callGenerator.cpp @ 6266:1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
6984705: JSR 292 method handle creation should not go through JNI
Summary: remove assembly code for JDK 7 chained method handles
Reviewed-by: jrose, twisti, kvn, mhaupt
Contributed-by: John Rose <john.r.rose@oracle.com>, Christian Thalinger <christian.thalinger@oracle.com>, Michael Haupt <michael.haupt@oracle.com>
author | twisti |
---|---|
date | Tue, 24 Jul 2012 10:51:00 -0700 |
parents | 765ee2d1674b |
children | 6c5b7a6becc8 |
comparison
equal
deleted
inserted
replaced
6241:aba91a731143 | 6266:1d7922586cf6 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
24 | 24 |
25 #include "precompiled.hpp" | 25 #include "precompiled.hpp" |
26 #include "ci/bcEscapeAnalyzer.hpp" | 26 #include "ci/bcEscapeAnalyzer.hpp" |
27 #include "ci/ciCallSite.hpp" | 27 #include "ci/ciCallSite.hpp" |
28 #include "ci/ciCPCache.hpp" | 28 #include "ci/ciCPCache.hpp" |
29 #include "ci/ciMemberName.hpp" | |
29 #include "ci/ciMethodHandle.hpp" | 30 #include "ci/ciMethodHandle.hpp" |
30 #include "classfile/javaClasses.hpp" | 31 #include "classfile/javaClasses.hpp" |
31 #include "compiler/compileLog.hpp" | 32 #include "compiler/compileLog.hpp" |
32 #include "opto/addnode.hpp" | 33 #include "opto/addnode.hpp" |
33 #include "opto/callGenerator.hpp" | 34 #include "opto/callGenerator.hpp" |
37 #include "opto/parse.hpp" | 38 #include "opto/parse.hpp" |
38 #include "opto/rootnode.hpp" | 39 #include "opto/rootnode.hpp" |
39 #include "opto/runtime.hpp" | 40 #include "opto/runtime.hpp" |
40 #include "opto/subnode.hpp" | 41 #include "opto/subnode.hpp" |
41 | 42 |
42 CallGenerator::CallGenerator(ciMethod* method) { | |
43 _method = method; | |
44 } | |
45 | 43 |
46 // Utility function. | 44 // Utility function. |
47 const TypeFunc* CallGenerator::tf() const { | 45 const TypeFunc* CallGenerator::tf() const { |
48 return TypeFunc::make(method()); | 46 return TypeFunc::make(method()); |
49 } | 47 } |
146 // And dump it back to the caller, decorated with any exceptions: | 144 // And dump it back to the caller, decorated with any exceptions: |
147 return kit.transfer_exceptions_into_jvms(); | 145 return kit.transfer_exceptions_into_jvms(); |
148 } | 146 } |
149 // Mark the call node as virtual, sort of: | 147 // Mark the call node as virtual, sort of: |
150 call->set_optimized_virtual(true); | 148 call->set_optimized_virtual(true); |
151 if (method()->is_method_handle_invoke()) { | 149 if (method()->is_method_handle_intrinsic() || |
150 method()->is_compiled_lambda_form()) { | |
152 call->set_method_handle_invoke(true); | 151 call->set_method_handle_invoke(true); |
153 } | 152 } |
154 } | 153 } |
155 kit.set_arguments_for_java_call(call); | 154 kit.set_arguments_for_java_call(call); |
156 kit.set_edges_for_java_call(call, false, _separate_io_proj); | 155 kit.set_edges_for_java_call(call, false, _separate_io_proj); |
323 return new DirectCallGenerator(m, separate_io_proj); | 322 return new DirectCallGenerator(m, separate_io_proj); |
324 } | 323 } |
325 | 324 |
326 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { | 325 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { |
327 assert(!m->is_static(), "for_virtual_call mismatch"); | 326 assert(!m->is_static(), "for_virtual_call mismatch"); |
328 assert(!m->is_method_handle_invoke(), "should be a direct call"); | 327 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); |
329 return new VirtualCallGenerator(m, vtable_index); | 328 return new VirtualCallGenerator(m, vtable_index); |
330 } | 329 } |
331 | 330 |
332 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) { | 331 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) { |
333 assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch"); | 332 assert(m->is_compiled_lambda_form(), "for_dynamic_call mismatch"); |
333 //@@ FIXME: this should be done via a direct call | |
334 return new DynamicCallGenerator(m); | 334 return new DynamicCallGenerator(m); |
335 } | 335 } |
336 | 336 |
337 // Allow inlining decisions to be delayed | 337 // Allow inlining decisions to be delayed |
338 class LateInlineCallGenerator : public DirectCallGenerator { | 338 class LateInlineCallGenerator : public DirectCallGenerator { |
652 } | 652 } |
653 return kit.transfer_exceptions_into_jvms(); | 653 return kit.transfer_exceptions_into_jvms(); |
654 } | 654 } |
655 | 655 |
656 | 656 |
657 //------------------------PredictedDynamicCallGenerator----------------------- | 657 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee) { |
658 // Internal class which handles all out-of-line calls checking receiver type. | 658 assert(callee->is_method_handle_intrinsic() || |
659 class PredictedDynamicCallGenerator : public CallGenerator { | 659 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch"); |
660 ciMethodHandle* _predicted_method_handle; | 660 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee); |
661 CallGenerator* _if_missed; | |
662 CallGenerator* _if_hit; | |
663 float _hit_prob; | |
664 | |
665 public: | |
666 PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle, | |
667 CallGenerator* if_missed, | |
668 CallGenerator* if_hit, | |
669 float hit_prob) | |
670 : CallGenerator(if_missed->method()), | |
671 _predicted_method_handle(predicted_method_handle), | |
672 _if_missed(if_missed), | |
673 _if_hit(if_hit), | |
674 _hit_prob(hit_prob) | |
675 {} | |
676 | |
677 virtual bool is_inline() const { return _if_hit->is_inline(); } | |
678 virtual bool is_deferred() const { return _if_hit->is_deferred(); } | |
679 | |
680 virtual JVMState* generate(JVMState* jvms); | |
681 }; | |
682 | |
683 | |
684 CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle, | |
685 CallGenerator* if_missed, | |
686 CallGenerator* if_hit, | |
687 float hit_prob) { | |
688 return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob); | |
689 } | |
690 | |
691 | |
692 CallGenerator* CallGenerator::for_method_handle_call(Node* method_handle, JVMState* jvms, | |
693 ciMethod* caller, ciMethod* callee, ciCallProfile profile) { | |
694 assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_method_handle_call mismatch"); | |
695 CallGenerator* cg = CallGenerator::for_method_handle_inline(method_handle, jvms, caller, callee, profile); | |
696 if (cg != NULL) | 661 if (cg != NULL) |
697 return cg; | 662 return cg; |
698 return CallGenerator::for_direct_call(callee); | 663 return CallGenerator::for_direct_call(callee); |
699 } | 664 } |
700 | 665 |
701 CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms, | 666 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) { |
702 ciMethod* caller, ciMethod* callee, ciCallProfile profile) { | 667 GraphKit kit(jvms); |
703 if (method_handle->Opcode() == Op_ConP) { | 668 PhaseGVN& gvn = kit.gvn(); |
704 const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr(); | 669 Compile* C = kit.C; |
705 ciObject* const_oop = oop_ptr->const_oop(); | 670 vmIntrinsics::ID iid = callee->intrinsic_id(); |
706 ciMethodHandle* method_handle = const_oop->as_method_handle(); | 671 switch (iid) { |
707 | 672 case vmIntrinsics::_invokeBasic: |
708 // Set the callee to have access to the class and signature in | 673 { |
709 // the MethodHandleCompiler. | 674 // get MethodHandle receiver |
710 method_handle->set_callee(callee); | 675 Node* receiver = kit.argument(0); |
711 method_handle->set_caller(caller); | 676 if (receiver->Opcode() == Op_ConP) { |
712 method_handle->set_call_profile(profile); | 677 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); |
713 | 678 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); |
714 // Get an adapter for the MethodHandle. | 679 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove |
715 ciMethod* target_method = method_handle->get_method_handle_adapter(); | 680 const int vtable_index = methodOopDesc::invalid_vtable_index; |
716 if (target_method != NULL) { | 681 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS); |
717 CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); | 682 if (cg != NULL && cg->is_inline()) |
718 if (cg != NULL && cg->is_inline()) | 683 return cg; |
719 return cg; | 684 } else { |
720 } | 685 if (PrintInlining) CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant"); |
721 } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 && | |
722 method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) { | |
723 float prob = PROB_FAIR; | |
724 Node* meth_region = method_handle->in(0); | |
725 if (meth_region->is_Region() && | |
726 meth_region->in(1)->is_Proj() && meth_region->in(2)->is_Proj() && | |
727 meth_region->in(1)->in(0) == meth_region->in(2)->in(0) && | |
728 meth_region->in(1)->in(0)->is_If()) { | |
729 // If diamond, so grab the probability of the test to drive the inlining below | |
730 prob = meth_region->in(1)->in(0)->as_If()->_prob; | |
731 if (meth_region->in(1)->is_IfTrue()) { | |
732 prob = 1 - prob; | |
733 } | 686 } |
734 } | 687 } |
735 | 688 break; |
736 // selectAlternative idiom merging two constant MethodHandles. | 689 |
737 // Generate a guard so that each can be inlined. We might want to | 690 case vmIntrinsics::_linkToVirtual: |
738 // do more inputs at later point but this gets the most common | 691 case vmIntrinsics::_linkToStatic: |
739 // case. | 692 case vmIntrinsics::_linkToSpecial: |
740 CallGenerator* cg1 = for_method_handle_call(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob)); | 693 case vmIntrinsics::_linkToInterface: |
741 CallGenerator* cg2 = for_method_handle_call(method_handle->in(2), jvms, caller, callee, profile.rescale(prob)); | 694 { |
742 if (cg1 != NULL && cg2 != NULL) { | 695 // pop MemberName argument |
743 const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr(); | 696 Node* member_name = kit.argument(callee->arg_size() - 1); |
744 ciObject* const_oop = oop_ptr->const_oop(); | 697 if (member_name->Opcode() == Op_ConP) { |
745 ciMethodHandle* mh = const_oop->as_method_handle(); | 698 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); |
746 return new PredictedDynamicCallGenerator(mh, cg2, cg1, prob); | 699 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); |
747 } | 700 |
701 // In lamda forms we erase signature types to avoid resolving issues | |
702 // involving class loaders. When we optimize a method handle invoke | |
703 // to a direct call we must cast the receiver and arguments to its | |
704 // actual types. | |
705 ciSignature* signature = target->signature(); | |
706 const int receiver_skip = target->is_static() ? 0 : 1; | |
707 // Cast receiver to its type. | |
708 if (!target->is_static()) { | |
709 Node* arg = kit.argument(0); | |
710 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); | |
711 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); | |
712 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { | |
713 Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type)); | |
714 kit.set_argument(0, cast_obj); | |
715 } | |
716 } | |
717 // Cast reference arguments to its type. | |
718 for (int i = 0; i < signature->count(); i++) { | |
719 ciType* t = signature->type_at(i); | |
720 if (t->is_klass()) { | |
721 Node* arg = kit.argument(receiver_skip + i); | |
722 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); | |
723 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); | |
724 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { | |
725 Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type)); | |
726 kit.set_argument(receiver_skip + i, cast_obj); | |
727 } | |
728 } | |
729 } | |
730 const int vtable_index = methodOopDesc::invalid_vtable_index; | |
731 const bool call_is_virtual = target->is_abstract(); // FIXME workaround | |
732 CallGenerator* cg = C->call_generator(target, vtable_index, call_is_virtual, jvms, true, PROB_ALWAYS); | |
733 if (cg != NULL && cg->is_inline()) | |
734 return cg; | |
735 } | |
736 } | |
737 break; | |
738 | |
739 default: | |
740 fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); | |
741 break; | |
748 } | 742 } |
749 return NULL; | 743 return NULL; |
750 } | |
751 | |
752 CallGenerator* CallGenerator::for_invokedynamic_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile) { | |
753 assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_invokedynamic_call mismatch"); | |
754 // Get the CallSite object. | |
755 ciBytecodeStream str(caller); | |
756 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. | |
757 ciCallSite* call_site = str.get_call_site(); | |
758 CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, callee, profile); | |
759 if (cg != NULL) | |
760 return cg; | |
761 return CallGenerator::for_dynamic_call(callee); | |
762 } | |
763 | |
764 CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, | |
765 ciMethod* caller, ciMethod* callee, ciCallProfile profile) { | |
766 ciMethodHandle* method_handle = call_site->get_target(); | |
767 | |
768 // Set the callee to have access to the class and signature in the | |
769 // MethodHandleCompiler. | |
770 method_handle->set_callee(callee); | |
771 method_handle->set_caller(caller); | |
772 method_handle->set_call_profile(profile); | |
773 | |
774 // Get an adapter for the MethodHandle. | |
775 ciMethod* target_method = method_handle->get_invokedynamic_adapter(); | |
776 if (target_method != NULL) { | |
777 Compile *C = Compile::current(); | |
778 CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); | |
779 if (cg != NULL && cg->is_inline()) { | |
780 // Add a dependence for invalidation of the optimization. | |
781 if (!call_site->is_constant_call_site()) { | |
782 C->dependencies()->assert_call_site_target_value(call_site, method_handle); | |
783 } | |
784 return cg; | |
785 } | |
786 } | |
787 return NULL; | |
788 } | |
789 | |
790 | |
791 JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) { | |
792 GraphKit kit(jvms); | |
793 Compile* C = kit.C; | |
794 PhaseGVN& gvn = kit.gvn(); | |
795 | |
796 CompileLog* log = C->log(); | |
797 if (log != NULL) { | |
798 log->elem("predicted_dynamic_call bci='%d'", jvms->bci()); | |
799 } | |
800 | |
801 const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true); | |
802 Node* predicted_mh = kit.makecon(predicted_mh_ptr); | |
803 | |
804 Node* bol = NULL; | |
805 int bc = jvms->method()->java_code_at_bci(jvms->bci()); | |
806 if (bc != Bytecodes::_invokedynamic) { | |
807 // This is the selectAlternative idiom for guardWithTest or | |
808 // similar idioms. | |
809 Node* receiver = kit.argument(0); | |
810 | |
811 // Check if the MethodHandle is the expected one | |
812 Node* cmp = gvn.transform(new (C, 3) CmpPNode(receiver, predicted_mh)); | |
813 bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) ); | |
814 } else { | |
815 // Get the constant pool cache from the caller class. | |
816 ciMethod* caller_method = jvms->method(); | |
817 ciBytecodeStream str(caller_method); | |
818 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. | |
819 ciCPCache* cpcache = str.get_cpcache(); | |
820 | |
821 // Get the offset of the CallSite from the constant pool cache | |
822 // pointer. | |
823 int index = str.get_method_index(); | |
824 size_t call_site_offset = cpcache->get_f1_offset(index); | |
825 | |
826 // Load the CallSite object from the constant pool cache. | |
827 const TypeOopPtr* cpcache_type = TypeOopPtr::make_from_constant(cpcache); // returns TypeAryPtr of type T_OBJECT | |
828 const TypeOopPtr* call_site_type = TypeOopPtr::make_from_klass(C->env()->CallSite_klass()); | |
829 Node* cpcache_adr = kit.makecon(cpcache_type); | |
830 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, call_site_offset); | |
831 // The oops in the constant pool cache are not compressed; load then as raw pointers. | |
832 Node* call_site = kit.make_load(kit.control(), call_site_adr, call_site_type, T_ADDRESS, Compile::AliasIdxRaw); | |
833 | |
834 // Load the target MethodHandle from the CallSite object. | |
835 const TypeOopPtr* target_type = TypeOopPtr::make_from_klass(C->env()->MethodHandle_klass()); | |
836 Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes()); | |
837 Node* target_mh = kit.make_load(kit.control(), target_adr, target_type, T_OBJECT); | |
838 | |
839 // Check if the MethodHandle is still the same. | |
840 Node* cmp = gvn.transform(new (C, 3) CmpPNode(target_mh, predicted_mh)); | |
841 bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) ); | |
842 } | |
843 IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN); | |
844 kit.set_control( gvn.transform(new (C, 1) IfTrueNode (iff))); | |
845 Node* slow_ctl = gvn.transform(new (C, 1) IfFalseNode(iff)); | |
846 | |
847 SafePointNode* slow_map = NULL; | |
848 JVMState* slow_jvms; | |
849 { PreserveJVMState pjvms(&kit); | |
850 kit.set_control(slow_ctl); | |
851 if (!kit.stopped()) { | |
852 slow_jvms = _if_missed->generate(kit.sync_jvms()); | |
853 if (kit.failing()) | |
854 return NULL; // might happen because of NodeCountInliningCutoff | |
855 assert(slow_jvms != NULL, "must be"); | |
856 kit.add_exception_states_from(slow_jvms); | |
857 kit.set_map(slow_jvms->map()); | |
858 if (!kit.stopped()) | |
859 slow_map = kit.stop(); | |
860 } | |
861 } | |
862 | |
863 if (kit.stopped()) { | |
864 // Instance exactly does not matches the desired type. | |
865 kit.set_jvms(slow_jvms); | |
866 return kit.transfer_exceptions_into_jvms(); | |
867 } | |
868 | |
869 // Make the hot call: | |
870 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); | |
871 if (new_jvms == NULL) { | |
872 // Inline failed, so make a direct call. | |
873 assert(_if_hit->is_inline(), "must have been a failed inline"); | |
874 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); | |
875 new_jvms = cg->generate(kit.sync_jvms()); | |
876 } | |
877 kit.add_exception_states_from(new_jvms); | |
878 kit.set_jvms(new_jvms); | |
879 | |
880 // Need to merge slow and fast? | |
881 if (slow_map == NULL) { | |
882 // The fast path is the only path remaining. | |
883 return kit.transfer_exceptions_into_jvms(); | |
884 } | |
885 | |
886 if (kit.stopped()) { | |
887 // Inlined method threw an exception, so it's just the slow path after all. | |
888 kit.set_jvms(slow_jvms); | |
889 return kit.transfer_exceptions_into_jvms(); | |
890 } | |
891 | |
892 // Finish the diamond. | |
893 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization | |
894 RegionNode* region = new (C, 3) RegionNode(3); | |
895 region->init_req(1, kit.control()); | |
896 region->init_req(2, slow_map->control()); | |
897 kit.set_control(gvn.transform(region)); | |
898 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); | |
899 iophi->set_req(2, slow_map->i_o()); | |
900 kit.set_i_o(gvn.transform(iophi)); | |
901 kit.merge_memory(slow_map->merged_memory(), region, 2); | |
902 uint tos = kit.jvms()->stkoff() + kit.sp(); | |
903 uint limit = slow_map->req(); | |
904 for (uint i = TypeFunc::Parms; i < limit; i++) { | |
905 // Skip unused stack slots; fast forward to monoff(); | |
906 if (i == tos) { | |
907 i = kit.jvms()->monoff(); | |
908 if( i >= limit ) break; | |
909 } | |
910 Node* m = kit.map()->in(i); | |
911 Node* n = slow_map->in(i); | |
912 if (m != n) { | |
913 const Type* t = gvn.type(m)->meet(gvn.type(n)); | |
914 Node* phi = PhiNode::make(region, m, t); | |
915 phi->set_req(2, n); | |
916 kit.map()->set_req(i, gvn.transform(phi)); | |
917 } | |
918 } | |
919 return kit.transfer_exceptions_into_jvms(); | |
920 } | 744 } |
921 | 745 |
922 | 746 |
923 //-------------------------UncommonTrapCallGenerator----------------------------- | 747 //-------------------------UncommonTrapCallGenerator----------------------------- |
924 // Internal class which handles all out-of-line calls checking receiver type. | 748 // Internal class which handles all out-of-line calls checking receiver type. |