comparison src/share/vm/interpreter/interpreterRuntime.cpp @ 1783:d5d065957597

6953144: Tiered compilation Summary: Infrastructure for tiered compilation support (interpreter + c1 + c2) for 32 and 64 bit. Simple tiered policy implementation. Reviewed-by: kvn, never, phh, twisti
author iveresov
date Fri, 03 Sep 2010 17:51:07 -0700
parents 3e8fbc61cee8
children d257356e35f0
comparison
equal deleted inserted replaced
1782:f353275af40e 1783:d5d065957597
775 775
776 //------------------------------------------------------------------------------------------------------------------------ 776 //------------------------------------------------------------------------------------------------------------------------
777 // Miscellaneous 777 // Miscellaneous
778 778
779 779
780 #ifndef PRODUCT
781 static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci, address branch_bcp) {
782 if (TraceInvocationCounterOverflow) {
783 InvocationCounter* ic = m->invocation_counter();
784 InvocationCounter* bc = m->backedge_counter();
785 ResourceMark rm;
786 const char* msg =
787 branch_bcp == NULL
788 ? "comp-policy cntr ovfl @ %d in entry of "
789 : "comp-policy cntr ovfl @ %d in loop of ";
790 tty->print(msg, bci);
791 m->print_value();
792 tty->cr();
793 ic->print();
794 bc->print();
795 if (ProfileInterpreter) {
796 if (branch_bcp != NULL) {
797 methodDataOop mdo = m->method_data();
798 if (mdo != NULL) {
799 int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
800 tty->print_cr("back branch count = %d", count);
801 }
802 }
803 }
804 }
805 }
806
807 static void trace_osr_request(methodHandle method, nmethod* osr, int bci) {
808 if (TraceOnStackReplacement) {
809 ResourceMark rm;
810 tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
811 method->print_short_name(tty);
812 tty->print_cr(" at bci %d", bci);
813 }
814 }
815 #endif // !PRODUCT
816
817 nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) { 780 nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) {
818 nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp); 781 nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp);
819 assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests"); 782 assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");
820 if (branch_bcp != NULL && nm != NULL) { 783 if (branch_bcp != NULL && nm != NULL) {
821 // This was a successful request for an OSR nmethod. Because 784 // This was a successful request for an OSR nmethod. Because
824 // to examine nm directly since it might have been freed and used 787 // to examine nm directly since it might have been freed and used
825 // for something else. 788 // for something else.
826 frame fr = thread->last_frame(); 789 frame fr = thread->last_frame();
827 methodOop method = fr.interpreter_frame_method(); 790 methodOop method = fr.interpreter_frame_method();
828 int bci = method->bci_from(fr.interpreter_frame_bcp()); 791 int bci = method->bci_from(fr.interpreter_frame_bcp());
829 nm = method->lookup_osr_nmethod_for(bci); 792 nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false);
830 } 793 }
831 return nm; 794 return nm;
832 } 795 }
833 796
834 IRT_ENTRY(nmethod*, 797 IRT_ENTRY(nmethod*,
838 UnlockFlagSaver fs(thread); 801 UnlockFlagSaver fs(thread);
839 802
840 frame fr = thread->last_frame(); 803 frame fr = thread->last_frame();
841 assert(fr.is_interpreted_frame(), "must come from interpreter"); 804 assert(fr.is_interpreted_frame(), "must come from interpreter");
842 methodHandle method(thread, fr.interpreter_frame_method()); 805 methodHandle method(thread, fr.interpreter_frame_method());
843 const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : 0; 806 const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
844 const int bci = method->bci_from(fr.interpreter_frame_bcp()); 807 const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
845 NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci, branch_bcp);) 808
846 809 nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, thread);
847 if (JvmtiExport::can_post_interpreter_events()) { 810
848 if (thread->is_interp_only_mode()) { 811 if (osr_nm != NULL) {
849 // If certain JVMTI events (e.g. frame pop event) are requested then the 812 // We may need to do on-stack replacement which requires that no
850 // thread is forced to remain in interpreted code. This is 813 // monitors in the activation are biased because their
851 // implemented partly by a check in the run_compiled_code 814 // BasicObjectLocks will need to migrate during OSR. Force
852 // section of the interpreter whether we should skip running 815 // unbiasing of all monitors in the activation now (even though
853 // compiled code, and partly by skipping OSR compiles for 816 // the OSR nmethod might be invalidated) because we don't have a
854 // interpreted-only threads. 817 // safepoint opportunity later once the migration begins.
855 if (branch_bcp != NULL) { 818 if (UseBiasedLocking) {
856 CompilationPolicy::policy()->reset_counter_for_back_branch_event(method); 819 ResourceMark rm;
857 return NULL; 820 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
821 for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
822 kptr < fr.interpreter_frame_monitor_begin();
823 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
824 if( kptr->obj() != NULL ) {
825 objects_to_revoke->append(Handle(THREAD, kptr->obj()));
826 }
858 } 827 }
859 } 828 BiasedLocking::revoke(objects_to_revoke);
860 } 829 }
861 830 }
862 if (branch_bcp == NULL) { 831 return osr_nm;
863 // when code cache is full, compilation gets switched off, UseCompiler
864 // is set to false
865 if (!method->has_compiled_code() && UseCompiler) {
866 CompilationPolicy::policy()->method_invocation_event(method, CHECK_NULL);
867 } else {
868 // Force counter overflow on method entry, even if no compilation
869 // happened. (The method_invocation_event call does this also.)
870 CompilationPolicy::policy()->reset_counter_for_invocation_event(method);
871 }
872 // compilation at an invocation overflow no longer goes and retries test for
873 // compiled method. We always run the loser of the race as interpreted.
874 // so return NULL
875 return NULL;
876 } else {
877 // counter overflow in a loop => try to do on-stack-replacement
878 nmethod* osr_nm = method->lookup_osr_nmethod_for(bci);
879 NOT_PRODUCT(trace_osr_request(method, osr_nm, bci);)
880 // when code cache is full, we should not compile any more...
881 if (osr_nm == NULL && UseCompiler) {
882 const int branch_bci = method->bci_from(branch_bcp);
883 CompilationPolicy::policy()->method_back_branch_event(method, branch_bci, bci, CHECK_NULL);
884 osr_nm = method->lookup_osr_nmethod_for(bci);
885 }
886 if (osr_nm == NULL) {
887 CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
888 return NULL;
889 } else {
890 // We may need to do on-stack replacement which requires that no
891 // monitors in the activation are biased because their
892 // BasicObjectLocks will need to migrate during OSR. Force
893 // unbiasing of all monitors in the activation now (even though
894 // the OSR nmethod might be invalidated) because we don't have a
895 // safepoint opportunity later once the migration begins.
896 if (UseBiasedLocking) {
897 ResourceMark rm;
898 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
899 for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
900 kptr < fr.interpreter_frame_monitor_begin();
901 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
902 if( kptr->obj() != NULL ) {
903 objects_to_revoke->append(Handle(THREAD, kptr->obj()));
904 }
905 }
906 BiasedLocking::revoke(objects_to_revoke);
907 }
908 return osr_nm;
909 }
910 }
911 IRT_END 832 IRT_END
912 833
913 IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp)) 834 IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp))
914 assert(ProfileInterpreter, "must be profiling interpreter"); 835 assert(ProfileInterpreter, "must be profiling interpreter");
915 int bci = method->bci_from(cur_bcp); 836 int bci = method->bci_from(cur_bcp);