comparison src/share/vm/c1/c1_Runtime1.cpp @ 12160:f98f5d48f511

7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked Summary: Do patching rather bailing out for unlinked call with appendix Reviewed-by: twisti, kvn
author roland
date Wed, 21 Aug 2013 13:34:45 +0200
parents 5888334c9c24
children b2e698d2276c d68894a09c7c
comparison
equal deleted inserted replaced
12159:b17d8f6d9ed7 12160:f98f5d48f511
817 bool deoptimize_for_volatile = false; 817 bool deoptimize_for_volatile = false;
818 int patch_field_offset = -1; 818 int patch_field_offset = -1;
819 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code 819 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
820 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code 820 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
821 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code 821 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
822 Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
822 bool load_klass_or_mirror_patch_id = 823 bool load_klass_or_mirror_patch_id =
823 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); 824 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
824 825
825 if (stub_id == Runtime1::access_field_patching_id) { 826 if (stub_id == Runtime1::access_field_patching_id) {
826 827
886 Bytecode_loadconstant cc(caller_method, bci); 887 Bytecode_loadconstant cc(caller_method, bci);
887 oop m = cc.resolve_constant(CHECK); 888 oop m = cc.resolve_constant(CHECK);
888 mirror = Handle(THREAD, m); 889 mirror = Handle(THREAD, m);
889 } 890 }
890 break; 891 break;
891 default: Unimplemented(); 892 default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
892 } 893 }
893 // convert to handle 894 // convert to handle
894 load_klass = KlassHandle(THREAD, k); 895 load_klass = KlassHandle(THREAD, k);
896 } else if (stub_id == load_appendix_patching_id) {
897 Bytecode_invoke bytecode(caller_method, bci);
898 Bytecodes::Code bc = bytecode.invoke_code();
899
900 CallInfo info;
901 constantPoolHandle pool(thread, caller_method->constants());
902 int index = bytecode.index();
903 LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
904 appendix = info.resolved_appendix();
905 switch (bc) {
906 case Bytecodes::_invokehandle: {
907 int cache_index = ConstantPool::decode_cpcache_index(index, true);
908 assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
909 pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
910 break;
911 }
912 case Bytecodes::_invokedynamic: {
913 pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
914 break;
915 }
916 default: fatal("unexpected bytecode for load_appendix_patching_id");
917 }
895 } else { 918 } else {
896 ShouldNotReachHere(); 919 ShouldNotReachHere();
897 } 920 }
898 921
899 if (deoptimize_for_volatile) { 922 if (deoptimize_for_volatile) {
990 1013
991 assert(n_copy->data() == 0 || 1014 assert(n_copy->data() == 0 ||
992 n_copy->data() == (intptr_t)Universe::non_oop_word(), 1015 n_copy->data() == (intptr_t)Universe::non_oop_word(),
993 "illegal init value"); 1016 "illegal init value");
994 if (stub_id == Runtime1::load_klass_patching_id) { 1017 if (stub_id == Runtime1::load_klass_patching_id) {
995 assert(load_klass() != NULL, "klass not set"); 1018 assert(load_klass() != NULL, "klass not set");
996 n_copy->set_data((intx) (load_klass())); 1019 n_copy->set_data((intx) (load_klass()));
997 } else { 1020 } else {
998 assert(mirror() != NULL, "klass not set"); 1021 assert(mirror() != NULL, "klass not set");
999 n_copy->set_data((intx) (mirror())); 1022 n_copy->set_data((intx) (mirror()));
1000 } 1023 }
1001 1024
1002 if (TracePatching) { 1025 if (TracePatching) {
1003 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 1026 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1004 } 1027 }
1005 1028 }
1006 #if defined(SPARC) || defined(PPC) 1029 } else if (stub_id == Runtime1::load_appendix_patching_id) {
1007 // Update the location in the nmethod with the proper 1030 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1008 // metadata. When the code was generated, a NULL was stuffed 1031 assert(n_copy->data() == 0 ||
1009 // in the metadata table and that table needs to be update to 1032 n_copy->data() == (intptr_t)Universe::non_oop_word(),
1010 // have the right value. On intel the value is kept 1033 "illegal init value");
1011 // directly in the instruction instead of in the metadata 1034 n_copy->set_data((intx) (appendix()));
1012 // table, so set_data above effectively updated the value. 1035
1013 nmethod* nm = CodeCache::find_nmethod(instr_pc); 1036 if (TracePatching) {
1014 assert(nm != NULL, "invalid nmethod_pc"); 1037 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1015 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1016 bool found = false;
1017 while (mds.next() && !found) {
1018 if (mds.type() == relocInfo::oop_type) {
1019 assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
1020 oop_Relocation* r = mds.oop_reloc();
1021 oop* oop_adr = r->oop_addr();
1022 *oop_adr = mirror();
1023 r->fix_oop_relocation();
1024 found = true;
1025 } else if (mds.type() == relocInfo::metadata_type) {
1026 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1027 metadata_Relocation* r = mds.metadata_reloc();
1028 Metadata** metadata_adr = r->metadata_addr();
1029 *metadata_adr = load_klass();
1030 r->fix_metadata_relocation();
1031 found = true;
1032 }
1033 }
1034 assert(found, "the metadata must exist!");
1035 #endif
1036
1037 } 1038 }
1038 } else { 1039 } else {
1039 ShouldNotReachHere(); 1040 ShouldNotReachHere();
1040 } 1041 }
1041 1042
1043 #if defined(SPARC) || defined(PPC)
1044 if (load_klass_or_mirror_patch_id ||
1045 stub_id == Runtime1::load_appendix_patching_id) {
1046 // Update the location in the nmethod with the proper
1047 // metadata. When the code was generated, a NULL was stuffed
1048 // in the metadata table and that table needs to be update to
1049 // have the right value. On intel the value is kept
1050 // directly in the instruction instead of in the metadata
1051 // table, so set_data above effectively updated the value.
1052 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1053 assert(nm != NULL, "invalid nmethod_pc");
1054 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1055 bool found = false;
1056 while (mds.next() && !found) {
1057 if (mds.type() == relocInfo::oop_type) {
1058 assert(stub_id == Runtime1::load_mirror_patching_id ||
1059 stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1060 oop_Relocation* r = mds.oop_reloc();
1061 oop* oop_adr = r->oop_addr();
1062 *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
1063 r->fix_oop_relocation();
1064 found = true;
1065 } else if (mds.type() == relocInfo::metadata_type) {
1066 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1067 metadata_Relocation* r = mds.metadata_reloc();
1068 Metadata** metadata_adr = r->metadata_addr();
1069 *metadata_adr = load_klass();
1070 r->fix_metadata_relocation();
1071 found = true;
1072 }
1073 }
1074 assert(found, "the metadata must exist!");
1075 }
1076 #endif
1042 if (do_patch) { 1077 if (do_patch) {
1043 // replace instructions 1078 // replace instructions
1044 // first replace the tail, then the call 1079 // first replace the tail, then the call
1045 #ifdef ARM 1080 #ifdef ARM
1046 if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) { 1081 if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) {
1075 *(unsigned char*)dst = (unsigned char) a_byte; 1110 *(unsigned char*)dst = (unsigned char) a_byte;
1076 } 1111 }
1077 ICache::invalidate_range(instr_pc, *byte_count); 1112 ICache::invalidate_range(instr_pc, *byte_count);
1078 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); 1113 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1079 1114
1080 if (load_klass_or_mirror_patch_id) { 1115 if (load_klass_or_mirror_patch_id ||
1116 stub_id == Runtime1::load_appendix_patching_id) {
1081 relocInfo::relocType rtype = 1117 relocInfo::relocType rtype =
1082 (stub_id == Runtime1::load_klass_patching_id) ? 1118 (stub_id == Runtime1::load_klass_patching_id) ?
1083 relocInfo::metadata_type : 1119 relocInfo::metadata_type :
1084 relocInfo::oop_type; 1120 relocInfo::oop_type;
1085 // update relocInfo to metadata 1121 // update relocInfo to metadata
1116 } 1152 }
1117 } 1153 }
1118 1154
1119 // If we are patching in a non-perm oop, make sure the nmethod 1155 // If we are patching in a non-perm oop, make sure the nmethod
1120 // is on the right list. 1156 // is on the right list.
1121 if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) { 1157 if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
1158 (appendix.not_null() && appendix->is_scavengable()))) {
1122 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); 1159 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1123 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 1160 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1124 guarantee(nm != NULL, "only nmethods can contain non-perm oops"); 1161 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1125 if (!nm->on_scavenge_root_list()) { 1162 if (!nm->on_scavenge_root_list()) {
1126 CodeCache::add_scavenge_root_nmethod(nm); 1163 CodeCache::add_scavenge_root_nmethod(nm);
1177 // Return true if calling code is deoptimized 1214 // Return true if calling code is deoptimized
1178 1215
1179 return caller_is_deopted(); 1216 return caller_is_deopted();
1180 } 1217 }
1181 1218
1219 int Runtime1::move_appendix_patching(JavaThread* thread) {
1220 //
1221 // NOTE: we are still in Java
1222 //
1223 Thread* THREAD = thread;
1224 debug_only(NoHandleMark nhm;)
1225 {
1226 // Enter VM mode
1227
1228 ResetNoHandleMark rnhm;
1229 patch_code(thread, load_appendix_patching_id);
1230 }
1231 // Back in JAVA, use no oops DON'T safepoint
1232
1233 // Return true if calling code is deoptimized
1234
1235 return caller_is_deopted();
1236 }
1182 // 1237 //
1183 // Entry point for compiled code. We want to patch a nmethod. 1238 // Entry point for compiled code. We want to patch a nmethod.
1184 // We don't do a normal VM transition here because we want to 1239 // We don't do a normal VM transition here because we want to
1185 // know after the patching is complete and any safepoint(s) are taken 1240 // know after the patching is complete and any safepoint(s) are taken
1186 // if the calling nmethod was deoptimized. We do this by calling a 1241 // if the calling nmethod was deoptimized. We do this by calling a