comparison src/share/vm/c1/c1_Runtime1.cpp @ 12355:cefad50507d8

Merge with hs25-b53
author Gilles Duboscq <duboscq@ssw.jku.at>
date Fri, 11 Oct 2013 10:38:03 +0200
parents 6b0fd0964b87 d0cfa6502dfe
children 096c224171c4
comparison
equal deleted inserted replaced
12058:ccb4f2af2319 12355:cefad50507d8
707 Bytecode_field field_access(caller, bci); 707 Bytecode_field field_access(caller, bci);
708 // This can be static or non-static field access 708 // This can be static or non-static field access
709 Bytecodes::Code code = field_access.code(); 709 Bytecodes::Code code = field_access.code();
710 710
711 // We must load class, initialize class and resolvethe field 711 // We must load class, initialize class and resolvethe field
712 FieldAccessInfo result; // initialize class if needed 712 fieldDescriptor result; // initialize class if needed
713 constantPoolHandle constants(THREAD, caller->constants()); 713 constantPoolHandle constants(THREAD, caller->constants());
714 LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL); 714 LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
715 return result.klass()(); 715 return result.field_holder();
716 } 716 }
717 717
718 718
719 // 719 //
720 // This routine patches sites where a class wasn't loaded or 720 // This routine patches sites where a class wasn't loaded or
817 bool deoptimize_for_volatile = false; 817 bool deoptimize_for_volatile = false;
818 int patch_field_offset = -1; 818 int patch_field_offset = -1;
819 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code 819 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
820 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code 820 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
821 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code 821 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
822 Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
822 bool load_klass_or_mirror_patch_id = 823 bool load_klass_or_mirror_patch_id =
823 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); 824 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
824 825
825 if (stub_id == Runtime1::access_field_patching_id) { 826 if (stub_id == Runtime1::access_field_patching_id) {
826 827
827 Bytecode_field field_access(caller_method, bci); 828 Bytecode_field field_access(caller_method, bci);
828 FieldAccessInfo result; // initialize class if needed 829 fieldDescriptor result; // initialize class if needed
829 Bytecodes::Code code = field_access.code(); 830 Bytecodes::Code code = field_access.code();
830 constantPoolHandle constants(THREAD, caller_method->constants()); 831 constantPoolHandle constants(THREAD, caller_method->constants());
831 LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK); 832 LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
832 patch_field_offset = result.field_offset(); 833 patch_field_offset = result.offset();
833 834
834 // If we're patching a field which is volatile then at compile it 835 // If we're patching a field which is volatile then at compile it
835 // must not have been know to be volatile, so the generated code 836 // must not have been know to be volatile, so the generated code
836 // isn't correct for a volatile reference. The nmethod has to be 837 // isn't correct for a volatile reference. The nmethod has to be
837 // deoptimized so that the code can be regenerated correctly. 838 // deoptimized so that the code can be regenerated correctly.
886 Bytecode_loadconstant cc(caller_method, bci); 887 Bytecode_loadconstant cc(caller_method, bci);
887 oop m = cc.resolve_constant(CHECK); 888 oop m = cc.resolve_constant(CHECK);
888 mirror = Handle(THREAD, m); 889 mirror = Handle(THREAD, m);
889 } 890 }
890 break; 891 break;
891 default: Unimplemented(); 892 default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
892 } 893 }
893 // convert to handle 894 // convert to handle
894 load_klass = KlassHandle(THREAD, k); 895 load_klass = KlassHandle(THREAD, k);
896 } else if (stub_id == load_appendix_patching_id) {
897 Bytecode_invoke bytecode(caller_method, bci);
898 Bytecodes::Code bc = bytecode.invoke_code();
899
900 CallInfo info;
901 constantPoolHandle pool(thread, caller_method->constants());
902 int index = bytecode.index();
903 LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
904 appendix = info.resolved_appendix();
905 switch (bc) {
906 case Bytecodes::_invokehandle: {
907 int cache_index = ConstantPool::decode_cpcache_index(index, true);
908 assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
909 pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
910 break;
911 }
912 case Bytecodes::_invokedynamic: {
913 pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
914 break;
915 }
916 default: fatal("unexpected bytecode for load_appendix_patching_id");
917 }
895 } else { 918 } else {
896 ShouldNotReachHere(); 919 ShouldNotReachHere();
897 } 920 }
898 921
899 if (deoptimize_for_volatile) { 922 if (deoptimize_for_volatile) {
911 } 934 }
912 935
913 Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint); 936 Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
914 937
915 // Return to the now deoptimized frame. 938 // Return to the now deoptimized frame.
916 }
917
918 // If we are patching in a non-perm oop, make sure the nmethod
919 // is on the right list.
920 if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
921 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
922 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
923 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
924 if (!nm->on_scavenge_root_list())
925 CodeCache::add_scavenge_root_nmethod(nm);
926 } 939 }
927 940
928 // Now copy code back 941 // Now copy code back
929 942
930 { 943 {
1000 1013
1001 assert(n_copy->data() == 0 || 1014 assert(n_copy->data() == 0 ||
1002 n_copy->data() == (intptr_t)Universe::non_oop_word(), 1015 n_copy->data() == (intptr_t)Universe::non_oop_word(),
1003 "illegal init value"); 1016 "illegal init value");
1004 if (stub_id == Runtime1::load_klass_patching_id) { 1017 if (stub_id == Runtime1::load_klass_patching_id) {
1005 assert(load_klass() != NULL, "klass not set"); 1018 assert(load_klass() != NULL, "klass not set");
1006 n_copy->set_data((intx) (load_klass())); 1019 n_copy->set_data((intx) (load_klass()));
1007 } else { 1020 } else {
1008 assert(mirror() != NULL, "klass not set"); 1021 assert(mirror() != NULL, "klass not set");
1009 n_copy->set_data((intx) (mirror())); 1022 n_copy->set_data(cast_from_oop<intx>(mirror()));
1010 } 1023 }
1011 1024
1012 if (TracePatching) { 1025 if (TracePatching) {
1013 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 1026 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1014 } 1027 }
1015 1028 }
1016 #if defined(SPARC) || defined(PPC) 1029 } else if (stub_id == Runtime1::load_appendix_patching_id) {
1017 // Update the location in the nmethod with the proper 1030 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1018 // metadata. When the code was generated, a NULL was stuffed 1031 assert(n_copy->data() == 0 ||
1019 // in the metadata table and that table needs to be update to 1032 n_copy->data() == (intptr_t)Universe::non_oop_word(),
1020 // have the right value. On intel the value is kept 1033 "illegal init value");
1021 // directly in the instruction instead of in the metadata 1034 n_copy->set_data(cast_from_oop<intx>(appendix()));
1022 // table, so set_data above effectively updated the value. 1035
1023 nmethod* nm = CodeCache::find_nmethod(instr_pc); 1036 if (TracePatching) {
1024 assert(nm != NULL, "invalid nmethod_pc"); 1037 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1025 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1026 bool found = false;
1027 while (mds.next() && !found) {
1028 if (mds.type() == relocInfo::oop_type) {
1029 assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
1030 oop_Relocation* r = mds.oop_reloc();
1031 oop* oop_adr = r->oop_addr();
1032 *oop_adr = mirror();
1033 r->fix_oop_relocation();
1034 found = true;
1035 } else if (mds.type() == relocInfo::metadata_type) {
1036 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1037 metadata_Relocation* r = mds.metadata_reloc();
1038 Metadata** metadata_adr = r->metadata_addr();
1039 *metadata_adr = load_klass();
1040 r->fix_metadata_relocation();
1041 found = true;
1042 }
1043 }
1044 assert(found, "the metadata must exist!");
1045 #endif
1046
1047 } 1038 }
1048 } else { 1039 } else {
1049 ShouldNotReachHere(); 1040 ShouldNotReachHere();
1050 } 1041 }
1051 1042
1043 #if defined(SPARC) || defined(PPC)
1044 if (load_klass_or_mirror_patch_id ||
1045 stub_id == Runtime1::load_appendix_patching_id) {
1046 // Update the location in the nmethod with the proper
1047 // metadata. When the code was generated, a NULL was stuffed
1048 // in the metadata table and that table needs to be update to
1049 // have the right value. On intel the value is kept
1050 // directly in the instruction instead of in the metadata
1051 // table, so set_data above effectively updated the value.
1052 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1053 assert(nm != NULL, "invalid nmethod_pc");
1054 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1055 bool found = false;
1056 while (mds.next() && !found) {
1057 if (mds.type() == relocInfo::oop_type) {
1058 assert(stub_id == Runtime1::load_mirror_patching_id ||
1059 stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1060 oop_Relocation* r = mds.oop_reloc();
1061 oop* oop_adr = r->oop_addr();
1062 *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
1063 r->fix_oop_relocation();
1064 found = true;
1065 } else if (mds.type() == relocInfo::metadata_type) {
1066 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1067 metadata_Relocation* r = mds.metadata_reloc();
1068 Metadata** metadata_adr = r->metadata_addr();
1069 *metadata_adr = load_klass();
1070 r->fix_metadata_relocation();
1071 found = true;
1072 }
1073 }
1074 assert(found, "the metadata must exist!");
1075 }
1076 #endif
1052 if (do_patch) { 1077 if (do_patch) {
1053 // replace instructions 1078 // replace instructions
1054 // first replace the tail, then the call 1079 // first replace the tail, then the call
1055 #ifdef ARM 1080 #ifdef ARM
1056 if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) { 1081 if((load_klass_or_mirror_patch_id ||
1082 stub_id == Runtime1::load_appendix_patching_id) &&
1083 !VM_Version::supports_movw()) {
1057 nmethod* nm = CodeCache::find_nmethod(instr_pc); 1084 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1058 address addr = NULL; 1085 address addr = NULL;
1059 assert(nm != NULL, "invalid nmethod_pc"); 1086 assert(nm != NULL, "invalid nmethod_pc");
1060 RelocIterator mds(nm, copy_buff, copy_buff + 1); 1087 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1061 while (mds.next()) { 1088 while (mds.next()) {
1062 if (mds.type() == relocInfo::oop_type) { 1089 if (mds.type() == relocInfo::oop_type) {
1063 assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id"); 1090 assert(stub_id == Runtime1::load_mirror_patching_id ||
1091 stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1064 oop_Relocation* r = mds.oop_reloc(); 1092 oop_Relocation* r = mds.oop_reloc();
1065 addr = (address)r->oop_addr(); 1093 addr = (address)r->oop_addr();
1066 break; 1094 break;
1067 } else if (mds.type() == relocInfo::metadata_type) { 1095 } else if (mds.type() == relocInfo::metadata_type) {
1068 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id"); 1096 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1085 *(unsigned char*)dst = (unsigned char) a_byte; 1113 *(unsigned char*)dst = (unsigned char) a_byte;
1086 } 1114 }
1087 ICache::invalidate_range(instr_pc, *byte_count); 1115 ICache::invalidate_range(instr_pc, *byte_count);
1088 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); 1116 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1089 1117
1090 if (load_klass_or_mirror_patch_id) { 1118 if (load_klass_or_mirror_patch_id ||
1119 stub_id == Runtime1::load_appendix_patching_id) {
1091 relocInfo::relocType rtype = 1120 relocInfo::relocType rtype =
1092 (stub_id == Runtime1::load_klass_patching_id) ? 1121 (stub_id == Runtime1::load_klass_patching_id) ?
1093 relocInfo::metadata_type : 1122 relocInfo::metadata_type :
1094 relocInfo::oop_type; 1123 relocInfo::oop_type;
1095 // update relocInfo to metadata 1124 // update relocInfo to metadata
1123 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry); 1152 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1124 } 1153 }
1125 } 1154 }
1126 } 1155 }
1127 } 1156 }
1157
1158 // If we are patching in a non-perm oop, make sure the nmethod
1159 // is on the right list.
1160 if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
1161 (appendix.not_null() && appendix->is_scavengable()))) {
1162 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1163 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1164 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1165 if (!nm->on_scavenge_root_list()) {
1166 CodeCache::add_scavenge_root_nmethod(nm);
1167 }
1168
1169 // Since we've patched some oops in the nmethod,
1170 // (re)register it with the heap.
1171 Universe::heap()->register_nmethod(nm);
1172 }
1128 JRT_END 1173 JRT_END
1129 1174
1130 // 1175 //
1131 // Entry point for compiled code. We want to patch a nmethod. 1176 // Entry point for compiled code. We want to patch a nmethod.
1132 // We don't do a normal VM transition here because we want to 1177 // We don't do a normal VM transition here because we want to
1172 // Return true if calling code is deoptimized 1217 // Return true if calling code is deoptimized
1173 1218
1174 return caller_is_deopted(); 1219 return caller_is_deopted();
1175 } 1220 }
1176 1221
1222 int Runtime1::move_appendix_patching(JavaThread* thread) {
1223 //
1224 // NOTE: we are still in Java
1225 //
1226 Thread* THREAD = thread;
1227 debug_only(NoHandleMark nhm;)
1228 {
1229 // Enter VM mode
1230
1231 ResetNoHandleMark rnhm;
1232 patch_code(thread, load_appendix_patching_id);
1233 }
1234 // Back in JAVA, use no oops DON'T safepoint
1235
1236 // Return true if calling code is deoptimized
1237
1238 return caller_is_deopted();
1239 }
1177 // 1240 //
1178 // Entry point for compiled code. We want to patch a nmethod. 1241 // Entry point for compiled code. We want to patch a nmethod.
1179 // We don't do a normal VM transition here because we want to 1242 // We don't do a normal VM transition here because we want to
1180 // know after the patching is complete and any safepoint(s) are taken 1243 // know after the patching is complete and any safepoint(s) are taken
1181 // if the calling nmethod was deoptimized. We do this by calling a 1244 // if the calling nmethod was deoptimized. We do this by calling a