comparison src/share/vm/c1/c1_Runtime1.cpp @ 1681:126ea7725993

6953477: Increase portability and flexibility of building Hotspot Summary: A collection of portability improvements including shared code support for PPC, ARM platforms, software floating point, cross compilation support and improvements in error crash detail. Reviewed-by: phh, never, coleenp, dholmes
author bobv
date Tue, 03 Aug 2010 08:13:38 -0400
parents d93949c5bdcc
children 3e8fbc61cee8
comparison
equal deleted inserted replaced
1680:a64438a2b7e8 1681:126ea7725993
142 case fpu2long_stub_id: 142 case fpu2long_stub_id:
143 case unwind_exception_id: 143 case unwind_exception_id:
144 #ifndef TIERED 144 #ifndef TIERED
145 case counter_overflow_id: // Not generated outside the tiered world 145 case counter_overflow_id: // Not generated outside the tiered world
146 #endif 146 #endif
147 #ifdef SPARC 147 #if defined(SPARC) || defined(PPC)
148 case handle_exception_nofpu_id: // Unused on sparc 148 case handle_exception_nofpu_id: // Unused on sparc
149 #endif 149 #endif
150 break; 150 break;
151 151
152 // All other stubs should have oopmaps 152 // All other stubs should have oopmaps
238 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit); 238 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
239 FUNCTION_CASE(entry, trace_block_entry); 239 FUNCTION_CASE(entry, trace_block_entry);
240 240
241 #undef FUNCTION_CASE 241 #undef FUNCTION_CASE
242 242
243 return "<unknown function>"; 243 // Soft float adds more runtime names.
244 return pd_name_for_address(entry);
244 } 245 }
245 246
246 247
247 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, klassOopDesc* klass)) 248 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, klassOopDesc* klass))
248 NOT_PRODUCT(_new_instance_slowcase_cnt++;) 249 NOT_PRODUCT(_new_instance_slowcase_cnt++;)
894 if (jump->jump_destination() == being_initialized_entry) { 895 if (jump->jump_destination() == being_initialized_entry) {
895 assert(do_patch == true, "initialization must be complete at this point"); 896 assert(do_patch == true, "initialization must be complete at this point");
896 } else { 897 } else {
897 // patch the instruction <move reg, klass> 898 // patch the instruction <move reg, klass>
898 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); 899 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
899 assert(n_copy->data() == 0, "illegal init value"); 900
901 assert(n_copy->data() == 0 ||
902 n_copy->data() == (int)Universe::non_oop_word(),
903 "illegal init value");
900 assert(load_klass() != NULL, "klass not set"); 904 assert(load_klass() != NULL, "klass not set");
901 n_copy->set_data((intx) (load_klass())); 905 n_copy->set_data((intx) (load_klass()));
902 906
903 if (TracePatching) { 907 if (TracePatching) {
904 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 908 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
905 } 909 }
906 910
907 #ifdef SPARC 911 #if defined(SPARC) || defined(PPC)
908 // Update the oop location in the nmethod with the proper 912 // Update the oop location in the nmethod with the proper
909 // oop. When the code was generated, a NULL was stuffed 913 // oop. When the code was generated, a NULL was stuffed
910 // in the oop table and that table needs to be update to 914 // in the oop table and that table needs to be update to
911 // have the right value. On intel the value is kept 915 // have the right value. On intel the value is kept
912 // directly in the instruction instead of in the oop 916 // directly in the instruction instead of in the oop
932 ShouldNotReachHere(); 936 ShouldNotReachHere();
933 } 937 }
934 if (do_patch) { 938 if (do_patch) {
935 // replace instructions 939 // replace instructions
936 // first replace the tail, then the call 940 // first replace the tail, then the call
941 #ifdef ARM
942 if(stub_id == Runtime1::load_klass_patching_id && !VM_Version::supports_movw()) {
943 copy_buff -= *byte_count;
944 NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
945 n_copy2->set_data((intx) (load_klass()), instr_pc);
946 }
947 #endif
948
937 for (int i = NativeCall::instruction_size; i < *byte_count; i++) { 949 for (int i = NativeCall::instruction_size; i < *byte_count; i++) {
938 address ptr = copy_buff + i; 950 address ptr = copy_buff + i;
939 int a_byte = (*ptr) & 0xFF; 951 int a_byte = (*ptr) & 0xFF;
940 address dst = instr_pc + i; 952 address dst = instr_pc + i;
941 *(unsigned char*)dst = (unsigned char) a_byte; 953 *(unsigned char*)dst = (unsigned char) a_byte;
958 // Sparc takes two relocations for an oop so update the second one. 970 // Sparc takes two relocations for an oop so update the second one.
959 address instr_pc2 = instr_pc + NativeMovConstReg::add_offset; 971 address instr_pc2 = instr_pc + NativeMovConstReg::add_offset;
960 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1); 972 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
961 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, 973 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
962 relocInfo::none, relocInfo::oop_type); 974 relocInfo::none, relocInfo::oop_type);
975 #endif
976 #ifdef PPC
977 { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
978 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
979 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, relocInfo::none, relocInfo::oop_type);
980 }
963 #endif 981 #endif
964 } 982 }
965 983
966 } else { 984 } else {
967 ICache::invalidate_range(copy_buff, *byte_count); 985 ICache::invalidate_range(copy_buff, *byte_count);