comparison src/share/vm/c1/c1_Runtime1.cpp @ 14726:92aa6797d639

Backed out merge changeset: b51e29501f30 Backed out merge revision to its first parent (8f483e200405)
author Doug Simon <doug.simon@oracle.com>
date Mon, 24 Mar 2014 21:30:43 +0100
parents b51e29501f30
children
comparison
equal deleted inserted replaced
14719:0bdd0d157040 14726:92aa6797d639
807 // that caller_method() == caller_code->method() 807 // that caller_method() == caller_code->method()
808 808
809 int bci = vfst.bci(); 809 int bci = vfst.bci();
810 Bytecodes::Code code = caller_method()->java_code_at(bci); 810 Bytecodes::Code code = caller_method()->java_code_at(bci);
811 811
812 #ifndef PRODUCT
812 // this is used by assertions in the access_field_patching_id 813 // this is used by assertions in the access_field_patching_id
813 BasicType patch_field_type = T_ILLEGAL; 814 BasicType patch_field_type = T_ILLEGAL;
815 #endif // PRODUCT
814 bool deoptimize_for_volatile = false; 816 bool deoptimize_for_volatile = false;
815 bool deoptimize_for_atomic = false;
816 int patch_field_offset = -1; 817 int patch_field_offset = -1;
817 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code 818 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
818 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code 819 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
819 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code 820 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
820 Handle appendix(THREAD, NULL); // oop needed by appendix_patching code 821 Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
836 // deoptimized so that the code can be regenerated correctly. 837 // deoptimized so that the code can be regenerated correctly.
837 // This check is only needed for access_field_patching since this 838 // This check is only needed for access_field_patching since this
838 // is the path for patching field offsets. load_klass is only 839 // is the path for patching field offsets. load_klass is only
839 // used for patching references to oops which don't need special 840 // used for patching references to oops which don't need special
840 // handling in the volatile case. 841 // handling in the volatile case.
841
842 deoptimize_for_volatile = result.access_flags().is_volatile(); 842 deoptimize_for_volatile = result.access_flags().is_volatile();
843 843
844 // If we are patching a field which should be atomic, then 844 #ifndef PRODUCT
845 // the generated code is not correct either, force deoptimizing.
846 // We need to only cover T_LONG and T_DOUBLE fields, as we can
847 // break access atomicity only for them.
848
849 // Strictly speaking, the deoptimizaation on 64-bit platforms
850 // is unnecessary, and T_LONG stores on 32-bit platforms need
851 // to be handled by special patching code when AlwaysAtomicAccesses
852 // becomes product feature. At this point, we are still going
853 // for the deoptimization for consistency against volatile
854 // accesses.
855
856 patch_field_type = result.field_type(); 845 patch_field_type = result.field_type();
857 deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)); 846 #endif
858
859 } else if (load_klass_or_mirror_patch_id) { 847 } else if (load_klass_or_mirror_patch_id) {
860 Klass* k = NULL; 848 Klass* k = NULL;
861 switch (code) { 849 switch (code) {
862 case Bytecodes::_putstatic: 850 case Bytecodes::_putstatic:
863 case Bytecodes::_getstatic: 851 case Bytecodes::_getstatic:
928 } 916 }
929 } else { 917 } else {
930 ShouldNotReachHere(); 918 ShouldNotReachHere();
931 } 919 }
932 920
933 if (deoptimize_for_volatile || deoptimize_for_atomic) { 921 if (deoptimize_for_volatile) {
934 // At compile time we assumed the field wasn't volatile/atomic but after 922 // At compile time we assumed the field wasn't volatile but after
935 // loading it turns out it was volatile/atomic so we have to throw the 923 // loading it turns out it was volatile so we have to throw the
936 // compiled code out and let it be regenerated. 924 // compiled code out and let it be regenerated.
937 if (TracePatching) { 925 if (TracePatching) {
938 if (deoptimize_for_volatile) { 926 tty->print_cr("Deoptimizing for patching volatile field reference");
939 tty->print_cr("Deoptimizing for patching volatile field reference"); 927 }
940 }
941 if (deoptimize_for_atomic) {
942 tty->print_cr("Deoptimizing for patching atomic field reference");
943 }
944 }
945
946 // It's possible the nmethod was invalidated in the last 928 // It's possible the nmethod was invalidated in the last
947 // safepoint, but if it's still alive then make it not_entrant. 929 // safepoint, but if it's still alive then make it not_entrant.
948 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 930 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
949 if (nm != NULL) { 931 if (nm != NULL) {
950 nm->make_not_entrant(); 932 nm->make_not_entrant();