comparison src/share/vm/c1/c1_Runtime1.cpp @ 1930:2d26b0046e0d

Merge.
author Thomas Wuerthinger <wuerthinger@ssw.jku.at>
date Tue, 30 Nov 2010 14:53:30 +0100
parents 2fe369533fed ce6848d0666d
children 48bbaead8b6c
comparison
equal deleted inserted replaced
1484:6b7001391c97 1930:2d26b0046e0d
1 /* 1 /*
2 * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved. 2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
14 * 14 *
15 * You should have received a copy of the GNU General Public License version 15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation, 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 * 18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * CA 95054 USA or visit www.sun.com if you need additional information or 20 * or visit www.oracle.com if you need additional information or have any
21 * have any questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 24
25 #include "incls/_precompiled.incl" 25 #include "incls/_precompiled.incl"
26 #include "incls/_c1_Runtime1.cpp.incl" 26 #include "incls/_c1_Runtime1.cpp.incl"
58 assert(_num_rt_args == args, "can't change the number of args"); 58 assert(_num_rt_args == args, "can't change the number of args");
59 } 59 }
60 60
61 // Implementation of Runtime1 61 // Implementation of Runtime1
62 62
63 bool Runtime1::_is_initialized = false;
64 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids]; 63 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
65 const char *Runtime1::_blob_names[] = { 64 const char *Runtime1::_blob_names[] = {
66 RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME) 65 RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
67 }; 66 };
68 67
87 int Runtime1::_throw_incompatible_class_change_error_count = 0; 86 int Runtime1::_throw_incompatible_class_change_error_count = 0;
88 int Runtime1::_throw_array_store_exception_count = 0; 87 int Runtime1::_throw_array_store_exception_count = 0;
89 int Runtime1::_throw_count = 0; 88 int Runtime1::_throw_count = 0;
90 #endif 89 #endif
91 90
92 BufferBlob* Runtime1::_buffer_blob = NULL;
93
94 // Simple helper to see if the caller of a runtime stub which 91 // Simple helper to see if the caller of a runtime stub which
95 // entered the VM has been deoptimized 92 // entered the VM has been deoptimized
96 93
97 static bool caller_is_deopted() { 94 static bool caller_is_deopted() {
98 JavaThread* thread = JavaThread::current(); 95 JavaThread* thread = JavaThread::current();
108 if ( !caller_is_deopted()) { 105 if ( !caller_is_deopted()) {
109 JavaThread* thread = JavaThread::current(); 106 JavaThread* thread = JavaThread::current();
110 RegisterMap reg_map(thread, false); 107 RegisterMap reg_map(thread, false);
111 frame runtime_frame = thread->last_frame(); 108 frame runtime_frame = thread->last_frame();
112 frame caller_frame = runtime_frame.sender(&reg_map); 109 frame caller_frame = runtime_frame.sender(&reg_map);
113 // bypass VM_DeoptimizeFrame and deoptimize the frame directly
114 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 110 Deoptimization::deoptimize_frame(thread, caller_frame.id());
115 assert(caller_is_deopted(), "Must be deoptimized"); 111 assert(caller_is_deopted(), "Must be deoptimized");
116 } 112 }
117 } 113 }
118 114
119 115
120 BufferBlob* Runtime1::get_buffer_blob() { 116 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
121 // Allocate code buffer space only once
122 BufferBlob* blob = _buffer_blob;
123 if (blob == NULL) {
124 // setup CodeBuffer. Preallocate a BufferBlob of size
125 // NMethodSizeLimit plus some extra space for constants.
126 int code_buffer_size = desired_max_code_buffer_size() + desired_max_constant_size();
127 blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
128 code_buffer_size);
129 guarantee(blob != NULL, "must create initial code buffer");
130 _buffer_blob = blob;
131 }
132 return _buffer_blob;
133 }
134
135 void Runtime1::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
136 // Preinitialize the consts section to some large size:
137 int locs_buffer_size = 20 * (relocInfo::length_limit + sizeof(relocInfo));
138 char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
139 code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
140 locs_buffer_size / sizeof(relocInfo));
141 code->initialize_consts_size(desired_max_constant_size());
142 // Call stubs + deopt/exception handler
143 code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
144 LIR_Assembler::exception_handler_size +
145 LIR_Assembler::deopt_handler_size);
146 }
147
148
149 void Runtime1::generate_blob_for(StubID id) {
150 assert(0 <= id && id < number_of_ids, "illegal stub id"); 117 assert(0 <= id && id < number_of_ids, "illegal stub id");
151 ResourceMark rm; 118 ResourceMark rm;
152 // create code buffer for code storage 119 // create code buffer for code storage
153 CodeBuffer code(get_buffer_blob()->instructions_begin(), 120 CodeBuffer code(buffer_blob);
154 get_buffer_blob()->instructions_size()); 121
155 122 Compilation::setup_code_buffer(&code, 0);
156 setup_code_buffer(&code, 0);
157 123
158 // create assembler for code generation 124 // create assembler for code generation
159 StubAssembler* sasm = new StubAssembler(&code, name_for(id), id); 125 StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
160 // generate code for runtime stub 126 // generate code for runtime stub
161 OopMapSet* oop_maps; 127 OopMapSet* oop_maps;
180 case c1x_arithmetic_drem_id: 146 case c1x_arithmetic_drem_id:
181 #ifndef TIERED 147 #ifndef TIERED
182 case counter_overflow_id: // Not generated outside the tiered world 148 case counter_overflow_id: // Not generated outside the tiered world
183 #endif 149 #endif
184 #ifdef SPARC 150 #ifdef SPARC
151 case counter_overflow_id:
152 #if defined(SPARC) || defined(PPC)
185 case handle_exception_nofpu_id: // Unused on sparc 153 case handle_exception_nofpu_id: // Unused on sparc
186 #endif 154 #endif
187 break; 155 break;
188 156
189 // All other stubs should have oopmaps 157 // All other stubs should have oopmaps
207 assert(blob != NULL, "blob must exist"); 175 assert(blob != NULL, "blob must exist");
208 _blobs[id] = blob; 176 _blobs[id] = blob;
209 } 177 }
210 178
211 179
212 void Runtime1::initialize() { 180 void Runtime1::initialize(BufferBlob* blob) {
213 // Warning: If we have more than one compilation running in parallel, we 181 // platform-dependent initialization
214 // need a lock here with the current setup (lazy initialization). 182 initialize_pd();
215 if (!is_initialized()) { 183 // generate stubs
216 _is_initialized = true; 184 for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
217 185 // printing
218 // platform-dependent initialization
219 initialize_pd();
220 // generate stubs
221 for (int id = 0; id < number_of_ids; id++) generate_blob_for((StubID)id);
222 // printing
223 #ifndef PRODUCT 186 #ifndef PRODUCT
224 if (PrintSimpleStubs) { 187 if (PrintSimpleStubs) {
225 ResourceMark rm; 188 ResourceMark rm;
226 for (int id = 0; id < number_of_ids; id++) { 189 for (int id = 0; id < number_of_ids; id++) {
227 _blobs[id]->print(); 190 _blobs[id]->print();
228 if (_blobs[id]->oop_maps() != NULL) { 191 if (_blobs[id]->oop_maps() != NULL) {
229 _blobs[id]->oop_maps()->print(); 192 _blobs[id]->oop_maps()->print();
230 }
231 } 193 }
232 } 194 }
233 #endif 195 }
234 } 196 #endif
235 } 197 }
236 198
237 199
238 CodeBlob* Runtime1::blob_for(StubID id) { 200 CodeBlob* Runtime1::blob_for(StubID id) {
239 assert(0 <= id && id < number_of_ids, "illegal stub id"); 201 assert(0 <= id && id < number_of_ids, "illegal stub id");
240 if (!is_initialized()) initialize();
241 return _blobs[id]; 202 return _blobs[id];
242 } 203 }
243 204
244 205
245 const char* Runtime1::name_for(StubID id) { 206 const char* Runtime1::name_for(StubID id) {
282 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit); 243 FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
283 FUNCTION_CASE(entry, trace_block_entry); 244 FUNCTION_CASE(entry, trace_block_entry);
284 245
285 #undef FUNCTION_CASE 246 #undef FUNCTION_CASE
286 247
287 return "<unknown function>"; 248 // Soft float adds more runtime names.
249 return pd_name_for_address(entry);
288 } 250 }
289 251
290 252
291 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, klassOopDesc* klass)) 253 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, klassOopDesc* klass))
292 NOT_PRODUCT(_new_instance_slowcase_cnt++;) 254 NOT_PRODUCT(_new_instance_slowcase_cnt++;)
364 address bcp = vfst.method()->bcp_from(vfst.bci()); 326 address bcp = vfst.method()->bcp_from(vfst.bci());
365 JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop()); 327 JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
366 } 328 }
367 JRT_END 329 JRT_END
368 330
369 #ifdef TIERED 331 // This is a helper to allow us to safepoint but allow the outer entry
370 JRT_ENTRY(void, Runtime1::counter_overflow(JavaThread* thread, int bci)) 332 // to be safepoint free if we need to do an osr
371 RegisterMap map(thread, false); 333 static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) {
372 frame fr = thread->last_frame().sender(&map); 334 nmethod* osr_nm = NULL;
335 methodHandle method(THREAD, m);
336
337 RegisterMap map(THREAD, false);
338 frame fr = THREAD->last_frame().sender(&map);
373 nmethod* nm = (nmethod*) fr.cb(); 339 nmethod* nm = (nmethod*) fr.cb();
374 assert(nm!= NULL && nm->is_nmethod(), "what?"); 340 assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
375 methodHandle method(thread, nm->method()); 341 methodHandle enclosing_method(THREAD, nm->method());
376 if (bci == 0) { 342
377 // invocation counter overflow 343 CompLevel level = (CompLevel)nm->comp_level();
378 if (!Tier1CountOnly) { 344 int bci = InvocationEntryBci;
379 CompilationPolicy::policy()->method_invocation_event(method, CHECK); 345 if (branch_bci != InvocationEntryBci) {
380 } else { 346 // Compute desination bci
381 method()->invocation_counter()->reset(); 347 address pc = method()->code_base() + branch_bci;
382 } 348 Bytecodes::Code branch = Bytecodes::code_at(pc, method());
383 } else { 349 int offset = 0;
384 if (!Tier1CountOnly) { 350 switch (branch) {
385 // Twe have a bci but not the destination bci and besides a backedge 351 case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
386 // event is more for OSR which we don't want here. 352 case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
387 CompilationPolicy::policy()->method_invocation_event(method, CHECK); 353 case Bytecodes::_if_icmple: case Bytecodes::_ifle:
388 } else { 354 case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
389 method()->backedge_counter()->reset(); 355 case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
390 } 356 case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
391 } 357 case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
392 JRT_END 358 offset = (int16_t)Bytes::get_Java_u2(pc + 1);
393 #endif // TIERED 359 break;
360 case Bytecodes::_goto_w:
361 offset = Bytes::get_Java_u4(pc + 1);
362 break;
363 default: ;
364 }
365 bci = branch_bci + offset;
366 }
367
368 osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD);
369 return osr_nm;
370 }
371
372 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, methodOopDesc* method))
373 nmethod* osr_nm;
374 JRT_BLOCK
375 osr_nm = counter_overflow_helper(thread, bci, method);
376 if (osr_nm != NULL) {
377 RegisterMap map(thread, false);
378 frame fr = thread->last_frame().sender(&map);
379 Deoptimization::deoptimize_frame(thread, fr.id());
380 }
381 JRT_BLOCK_END
382 return NULL;
383 JRT_END
394 384
395 extern void vm_exit(int code); 385 extern void vm_exit(int code);
396 386
397 // Enter this method from compiled code handler below. This is where we transition 387 // Enter this method from compiled code handler below. This is where we transition
398 // to VM mode. This is done as a helper routine so that the method called directly 388 // to VM mode. This is done as a helper routine so that the method called directly
459 frame caller_frame = stub_frame.sender(&reg_map); 449 frame caller_frame = stub_frame.sender(&reg_map);
460 450
461 // We don't really want to deoptimize the nmethod itself since we 451 // We don't really want to deoptimize the nmethod itself since we
462 // can actually continue in the exception handler ourselves but I 452 // can actually continue in the exception handler ourselves but I
463 // don't see an easy way to have the desired effect. 453 // don't see an easy way to have the desired effect.
464 VM_DeoptimizeFrame deopt(thread, caller_frame.id()); 454 Deoptimization::deoptimize_frame(thread, caller_frame.id());
465 VMThread::execute(&deopt); 455 assert(caller_is_deopted(), "Must be deoptimized");
466 456
467 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 457 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
468 } 458 }
469 459
470 // ExceptionCache is used only for exceptions at call and not for implicit exceptions 460 // ExceptionCache is used only for exceptions at call and not for implicit exceptions
658 } 648 }
659 JRT_END 649 JRT_END
660 650
661 651
662 static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { 652 static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
663 Bytecode_field* field_access = Bytecode_field_at(caller(), caller->bcp_from(bci)); 653 Bytecode_field* field_access = Bytecode_field_at(caller, bci);
664 // This can be static or non-static field access 654 // This can be static or non-static field access
665 Bytecodes::Code code = field_access->code(); 655 Bytecodes::Code code = field_access->code();
666 656
667 // We must load class, initialize class and resolvethe field 657 // We must load class, initialize class and resolvethe field
668 FieldAccessInfo result; // initialize class if needed 658 FieldAccessInfo result; // initialize class if needed
778 int patch_field_offset = -1; 768 int patch_field_offset = -1;
779 KlassHandle init_klass(THREAD, klassOop(NULL)); // klass needed by access_field_patching code 769 KlassHandle init_klass(THREAD, klassOop(NULL)); // klass needed by access_field_patching code
780 Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code 770 Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code
781 if (stub_id == Runtime1::access_field_patching_id) { 771 if (stub_id == Runtime1::access_field_patching_id) {
782 772
783 Bytecode_field* field_access = Bytecode_field_at(caller_method(), caller_method->bcp_from(bci)); 773 Bytecode_field* field_access = Bytecode_field_at(caller_method, bci);
784 FieldAccessInfo result; // initialize class if needed 774 FieldAccessInfo result; // initialize class if needed
785 Bytecodes::Code code = field_access->code(); 775 Bytecodes::Code code = field_access->code();
786 constantPoolHandle constants(THREAD, caller_method->constants()); 776 constantPoolHandle constants(THREAD, caller_method->constants());
787 LinkResolver::resolve_field(result, constants, field_access->index(), Bytecodes::java_code(code), false, CHECK); 777 LinkResolver::resolve_field(result, constants, field_access->index(), Bytecodes::java_code(code), false, CHECK);
788 patch_field_offset = result.field_offset(); 778 patch_field_offset = result.field_offset();
838 } 828 }
839 break; 829 break;
840 case Bytecodes::_ldc: 830 case Bytecodes::_ldc:
841 case Bytecodes::_ldc_w: 831 case Bytecodes::_ldc_w:
842 { 832 {
843 Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method(), 833 Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method, bci);
844 caller_method->bcp_from(bci)); 834 k = cc->resolve_constant(CHECK);
845 klassOop resolved = caller_method->constants()->klass_at(cc->index(), CHECK); 835 assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant");
846 // ldc wants the java mirror.
847 k = resolved->klass_part()->java_mirror();
848 } 836 }
849 break; 837 break;
850 default: 838 default:
851 tty->print_cr("Unhandled bytecode: %d stub_id=%d caller=%s bci=%d pc=%d", code, stub_id, caller_method->name()->as_C_string(), bci, caller_frame.pc()); 839 tty->print_cr("Unhandled bytecode: %d stub_id=%d caller=%s bci=%d pc=%d", code, stub_id, caller_method->name()->as_C_string(), bci, caller_frame.pc());
852 Unimplemented(); 840 Unimplemented();
869 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 857 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
870 if (nm != NULL) { 858 if (nm != NULL) {
871 nm->make_not_entrant(); 859 nm->make_not_entrant();
872 } 860 }
873 861
874 VM_DeoptimizeFrame deopt(thread, caller_frame.id()); 862 Deoptimization::deoptimize_frame(thread, caller_frame.id());
875 VMThread::execute(&deopt);
876 863
877 // Return to the now deoptimized frame. 864 // Return to the now deoptimized frame.
878 } 865 }
879 866
867 // If we are patching in a non-perm oop, make sure the nmethod
868 // is on the right list.
869 if (ScavengeRootsInCode && load_klass.not_null() && load_klass->is_scavengable()) {
870 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
871 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
872 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
873 if (!nm->on_scavenge_root_list())
874 CodeCache::add_scavenge_root_nmethod(nm);
875 }
880 876
881 // Now copy code back 877 // Now copy code back
882 878
883 { 879 {
884 MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag); 880 MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
948 if (jump->jump_destination() == being_initialized_entry) { 944 if (jump->jump_destination() == being_initialized_entry) {
949 assert(do_patch == true, "initialization must be complete at this point"); 945 assert(do_patch == true, "initialization must be complete at this point");
950 } else { 946 } else {
951 // patch the instruction <move reg, klass> 947 // patch the instruction <move reg, klass>
952 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); 948 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
953 assert(n_copy->data() == 0, "illegal init value"); 949
950 assert(n_copy->data() == 0 ||
951 n_copy->data() == (intptr_t)Universe::non_oop_word(),
952 "illegal init value");
954 assert(load_klass() != NULL, "klass not set"); 953 assert(load_klass() != NULL, "klass not set");
955 n_copy->set_data((intx) (load_klass())); 954 n_copy->set_data((intx) (load_klass()));
956 955
957 if (TracePatching) { 956 if (TracePatching) {
958 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 957 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
959 } 958 }
960 959
961 #ifdef SPARC 960 #if defined(SPARC) || defined(PPC)
962 // Update the oop location in the nmethod with the proper 961 // Update the oop location in the nmethod with the proper
963 // oop. When the code was generated, a NULL was stuffed 962 // oop. When the code was generated, a NULL was stuffed
964 // in the oop table and that table needs to be update to 963 // in the oop table and that table needs to be update to
965 // have the right value. On intel the value is kept 964 // have the right value. On intel the value is kept
966 // directly in the instruction instead of in the oop 965 // directly in the instruction instead of in the oop
986 ShouldNotReachHere(); 985 ShouldNotReachHere();
987 } 986 }
988 if (do_patch) { 987 if (do_patch) {
989 // replace instructions 988 // replace instructions
990 // first replace the tail, then the call 989 // first replace the tail, then the call
990 #ifdef ARM
991 if(stub_id == Runtime1::load_klass_patching_id && !VM_Version::supports_movw()) {
992 copy_buff -= *byte_count;
993 NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
994 n_copy2->set_data((intx) (load_klass()), instr_pc);
995 }
996 #endif
997
991 for (int i = NativeCall::instruction_size; i < *byte_count; i++) { 998 for (int i = NativeCall::instruction_size; i < *byte_count; i++) {
992 address ptr = copy_buff + i; 999 address ptr = copy_buff + i;
993 int a_byte = (*ptr) & 0xFF; 1000 int a_byte = (*ptr) & 0xFF;
994 address dst = instr_pc + i; 1001 address dst = instr_pc + i;
995 *(unsigned char*)dst = (unsigned char) a_byte; 1002 *(unsigned char*)dst = (unsigned char) a_byte;
1012 // Sparc takes two relocations for an oop so update the second one. 1019 // Sparc takes two relocations for an oop so update the second one.
1013 address instr_pc2 = instr_pc + NativeMovConstReg::add_offset; 1020 address instr_pc2 = instr_pc + NativeMovConstReg::add_offset;
1014 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1); 1021 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1015 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, 1022 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1016 relocInfo::none, relocInfo::oop_type); 1023 relocInfo::none, relocInfo::oop_type);
1024 #endif
1025 #ifdef PPC
1026 { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
1027 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1028 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, relocInfo::none, relocInfo::oop_type);
1029 }
1017 #endif 1030 #endif
1018 } 1031 }
1019 1032
1020 } else { 1033 } else {
1021 ICache::invalidate_range(copy_buff, *byte_count); 1034 ICache::invalidate_range(copy_buff, *byte_count);
1174 #endif 1187 #endif
1175 1188
1176 if (length == 0) return; 1189 if (length == 0) return;
1177 // Not guaranteed to be word atomic, but that doesn't matter 1190 // Not guaranteed to be word atomic, but that doesn't matter
1178 // for anything but an oop array, which is covered by oop_arraycopy. 1191 // for anything but an oop array, which is covered by oop_arraycopy.
1179 Copy::conjoint_bytes(src, dst, length); 1192 Copy::conjoint_jbytes(src, dst, length);
1180 JRT_END 1193 JRT_END
1181 1194
1182 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num)) 1195 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
1183 #ifndef PRODUCT 1196 #ifndef PRODUCT
1184 _oop_arraycopy_cnt++; 1197 _oop_arraycopy_cnt++;