Mercurial > hg > truffle
comparison src/share/vm/c1/c1_Runtime1.cpp @ 6948:e522a00b91aa
Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/ after NPG - C++ build works
author | Doug Simon <doug.simon@oracle.com> |
---|---|
date | Mon, 12 Nov 2012 23:14:12 +0100 |
parents | fd71ca8c5f88 d8ce2825b193 |
children | 2cb439954abf |
comparison
equal
deleted
inserted
replaced
6711:ae13cc658b80 | 6948:e522a00b91aa |
---|---|
313 // Soft float adds more runtime names. | 313 // Soft float adds more runtime names. |
314 return pd_name_for_address(entry); | 314 return pd_name_for_address(entry); |
315 } | 315 } |
316 | 316 |
317 | 317 |
318 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, klassOopDesc* klass)) | 318 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass)) |
319 NOT_PRODUCT(_new_instance_slowcase_cnt++;) | 319 NOT_PRODUCT(_new_instance_slowcase_cnt++;) |
320 | 320 |
321 assert(oop(klass)->is_klass(), "not a class"); | 321 assert(klass->is_klass(), "not a class"); |
322 instanceKlassHandle h(thread, klass); | 322 instanceKlassHandle h(thread, klass); |
323 h->check_valid_for_instantiation(true, CHECK); | 323 h->check_valid_for_instantiation(true, CHECK); |
324 // make sure klass is initialized | 324 // make sure klass is initialized |
325 h->initialize(CHECK); | 325 h->initialize(CHECK); |
326 // allocate instance and return via TLS | 326 // allocate instance and return via TLS |
327 oop obj = h->allocate_instance(CHECK); | 327 oop obj = h->allocate_instance(CHECK); |
328 thread->set_vm_result(obj); | 328 thread->set_vm_result(obj); |
329 JRT_END | 329 JRT_END |
330 | 330 |
331 | 331 |
332 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, klassOopDesc* klass, jint length)) | 332 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, Klass* klass, jint length)) |
333 NOT_PRODUCT(_new_type_array_slowcase_cnt++;) | 333 NOT_PRODUCT(_new_type_array_slowcase_cnt++;) |
334 // Note: no handle for klass needed since they are not used | 334 // Note: no handle for klass needed since they are not used |
335 // anymore after new_typeArray() and no GC can happen before. | 335 // anymore after new_typeArray() and no GC can happen before. |
336 // (This may have to change if this code changes!) | 336 // (This may have to change if this code changes!) |
337 assert(oop(klass)->is_klass(), "not a class"); | 337 assert(klass->is_klass(), "not a class"); |
338 BasicType elt_type = typeArrayKlass::cast(klass)->element_type(); | 338 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type(); |
339 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK); | 339 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK); |
340 thread->set_vm_result(obj); | 340 thread->set_vm_result(obj); |
341 // This is pretty rare but this runtime patch is stressful to deoptimization | 341 // This is pretty rare but this runtime patch is stressful to deoptimization |
342 // if we deoptimize here so force a deopt to stress the path. | 342 // if we deoptimize here so force a deopt to stress the path. |
343 if (DeoptimizeALot) { | 343 if (DeoptimizeALot) { |
345 } | 345 } |
346 | 346 |
347 JRT_END | 347 JRT_END |
348 | 348 |
349 | 349 |
350 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, klassOopDesc* array_klass, jint length)) | 350 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, Klass* array_klass, jint length)) |
351 NOT_PRODUCT(_new_object_array_slowcase_cnt++;) | 351 NOT_PRODUCT(_new_object_array_slowcase_cnt++;) |
352 | 352 |
353 // Note: no handle for klass needed since they are not used | 353 // Note: no handle for klass needed since they are not used |
354 // anymore after new_objArray() and no GC can happen before. | 354 // anymore after new_objArray() and no GC can happen before. |
355 // (This may have to change if this code changes!) | 355 // (This may have to change if this code changes!) |
356 assert(oop(array_klass)->is_klass(), "not a class"); | 356 assert(array_klass->is_klass(), "not a class"); |
357 klassOop elem_klass = objArrayKlass::cast(array_klass)->element_klass(); | 357 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass(); |
358 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK); | 358 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK); |
359 thread->set_vm_result(obj); | 359 thread->set_vm_result(obj); |
360 // This is pretty rare but this runtime patch is stressful to deoptimization | 360 // This is pretty rare but this runtime patch is stressful to deoptimization |
361 // if we deoptimize here so force a deopt to stress the path. | 361 // if we deoptimize here so force a deopt to stress the path. |
362 if (DeoptimizeALot) { | 362 if (DeoptimizeALot) { |
363 deopt_caller(); | 363 deopt_caller(); |
364 } | 364 } |
365 JRT_END | 365 JRT_END |
366 | 366 |
367 | 367 |
368 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, klassOopDesc* klass, int rank, jint* dims)) | 368 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims)) |
369 NOT_PRODUCT(_new_multi_array_slowcase_cnt++;) | 369 NOT_PRODUCT(_new_multi_array_slowcase_cnt++;) |
370 | 370 |
371 assert(oop(klass)->is_klass(), "not a class"); | 371 assert(klass->is_klass(), "not a class"); |
372 assert(rank >= 1, "rank must be nonzero"); | 372 assert(rank >= 1, "rank must be nonzero"); |
373 oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); | 373 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); |
374 thread->set_vm_result(obj); | 374 thread->set_vm_result(obj); |
375 JRT_END | 375 JRT_END |
376 | 376 |
377 | 377 |
378 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id)) | 378 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id)) |
389 | 389 |
390 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method | 390 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method |
391 // associated with the top activation record. The inlinee (that is possibly included in the enclosing | 391 // associated with the top activation record. The inlinee (that is possibly included in the enclosing |
392 // method) method oop is passed as an argument. In order to do that it is embedded in the code as | 392 // method) method oop is passed as an argument. In order to do that it is embedded in the code as |
393 // a constant. | 393 // a constant. |
394 static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) { | 394 static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, Method* m) { |
395 nmethod* osr_nm = NULL; | 395 nmethod* osr_nm = NULL; |
396 methodHandle method(THREAD, m); | 396 methodHandle method(THREAD, m); |
397 | 397 |
398 RegisterMap map(THREAD, false); | 398 RegisterMap map(THREAD, false); |
399 frame fr = THREAD->last_frame().sender(&map); | 399 frame fr = THREAD->last_frame().sender(&map); |
429 osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD); | 429 osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD); |
430 assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions"); | 430 assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions"); |
431 return osr_nm; | 431 return osr_nm; |
432 } | 432 } |
433 | 433 |
434 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, methodOopDesc* method)) | 434 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, Method* method)) |
435 nmethod* osr_nm; | 435 nmethod* osr_nm; |
436 JRT_BLOCK | 436 JRT_BLOCK |
437 osr_nm = counter_overflow_helper(thread, bci, method); | 437 osr_nm = counter_overflow_helper(thread, bci, method); |
438 if (osr_nm != NULL) { | 438 if (osr_nm != NULL) { |
439 RegisterMap map(thread, false); | 439 RegisterMap map(thread, false); |
667 JRT_ENTRY_NO_ASYNC(void, Runtime1::graal_monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock)) | 667 JRT_ENTRY_NO_ASYNC(void, Runtime1::graal_monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock)) |
668 NOT_PRODUCT(_monitorenter_slowcase_cnt++;) | 668 NOT_PRODUCT(_monitorenter_slowcase_cnt++;) |
669 #ifdef ASSERT | 669 #ifdef ASSERT |
670 if (TraceGraal >= 3) { | 670 if (TraceGraal >= 3) { |
671 char type[1024]; | 671 char type[1024]; |
672 obj->klass()->klass_part()->name()->as_C_string(type, 1024); | 672 obj->klass()->name()->as_C_string(type, 1024); |
673 markOop mark = obj->mark(); | 673 markOop mark = obj->mark(); |
674 tty->print_cr("entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, obj, type, mark, lock); | 674 tty->print_cr("entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, obj, type, mark, lock); |
675 tty->flush(); | 675 tty->flush(); |
676 } | 676 } |
677 if (PrintBiasedLockingStatistics) { | 677 if (PrintBiasedLockingStatistics) { |
727 ObjectSynchronizer::fast_exit(obj, lock, THREAD); | 727 ObjectSynchronizer::fast_exit(obj, lock, THREAD); |
728 } | 728 } |
729 #ifdef ASSERT | 729 #ifdef ASSERT |
730 if (TraceGraal >= 3) { | 730 if (TraceGraal >= 3) { |
731 char type[1024]; | 731 char type[1024]; |
732 obj->klass()->klass_part()->name()->as_C_string(type, 1024); | 732 obj->klass()->name()->as_C_string(type, 1024); |
733 tty->print_cr("exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, obj, type, obj->mark(), lock); | 733 tty->print_cr("exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, obj, type, obj->mark(), lock); |
734 tty->flush(); | 734 tty->flush(); |
735 } | 735 } |
736 #endif | 736 #endif |
737 JRT_END | 737 JRT_END |
741 bool address = mask_bits_are_true(flags, LOG_OBJECT_ADDRESS); | 741 bool address = mask_bits_are_true(flags, LOG_OBJECT_ADDRESS); |
742 bool newline = mask_bits_are_true(flags, LOG_OBJECT_NEWLINE); | 742 bool newline = mask_bits_are_true(flags, LOG_OBJECT_NEWLINE); |
743 if (!string) { | 743 if (!string) { |
744 if (!address && obj->is_oop_or_null(true)) { | 744 if (!address && obj->is_oop_or_null(true)) { |
745 char buf[O_BUFLEN]; | 745 char buf[O_BUFLEN]; |
746 tty->print("%s@%p", obj->klass()->klass_part()->name()->as_C_string(buf, O_BUFLEN), obj); | 746 tty->print("%s@%p", obj->klass()->name()->as_C_string(buf, O_BUFLEN), obj); |
747 } else { | 747 } else { |
748 tty->print("%p", obj); | 748 tty->print("%p", obj); |
749 } | 749 } |
750 } else { | 750 } else { |
751 ResourceMark rm; | 751 ResourceMark rm; |
861 | 861 |
862 // Return to the now deoptimized frame. | 862 // Return to the now deoptimized frame. |
863 JRT_END | 863 JRT_END |
864 | 864 |
865 | 865 |
866 static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { | 866 static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { |
867 Bytecode_field field_access(caller, bci); | 867 Bytecode_field field_access(caller, bci); |
868 // This can be static or non-static field access | 868 // This can be static or non-static field access |
869 Bytecodes::Code code = field_access.code(); | 869 Bytecodes::Code code = field_access.code(); |
870 | 870 |
871 // We must load class, initialize class and resolvethe field | 871 // We must load class, initialize class and resolvethe field |
974 // this is used by assertions in the access_field_patching_id | 974 // this is used by assertions in the access_field_patching_id |
975 BasicType patch_field_type = T_ILLEGAL; | 975 BasicType patch_field_type = T_ILLEGAL; |
976 #endif // PRODUCT | 976 #endif // PRODUCT |
977 bool deoptimize_for_volatile = false; | 977 bool deoptimize_for_volatile = false; |
978 int patch_field_offset = -1; | 978 int patch_field_offset = -1; |
979 KlassHandle init_klass(THREAD, klassOop(NULL)); // klass needed by access_field_patching code | 979 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code |
980 Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code | 980 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code |
981 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code | |
982 bool load_klass_or_mirror_patch_id = | |
983 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); | |
984 | |
981 if (stub_id == Runtime1::access_field_patching_id) { | 985 if (stub_id == Runtime1::access_field_patching_id) { |
982 | 986 |
983 Bytecode_field field_access(caller_method, bci); | 987 Bytecode_field field_access(caller_method, bci); |
984 FieldAccessInfo result; // initialize class if needed | 988 FieldAccessInfo result; // initialize class if needed |
985 Bytecodes::Code code = field_access.code(); | 989 Bytecodes::Code code = field_access.code(); |
998 deoptimize_for_volatile = result.access_flags().is_volatile(); | 1002 deoptimize_for_volatile = result.access_flags().is_volatile(); |
999 | 1003 |
1000 #ifndef PRODUCT | 1004 #ifndef PRODUCT |
1001 patch_field_type = result.field_type(); | 1005 patch_field_type = result.field_type(); |
1002 #endif | 1006 #endif |
1003 } else if (stub_id == Runtime1::load_klass_patching_id) { | 1007 } else if (load_klass_or_mirror_patch_id) { |
1004 oop k; | 1008 Klass* k = NULL; |
1005 switch (code) { | 1009 switch (code) { |
1006 case Bytecodes::_putstatic: | 1010 case Bytecodes::_putstatic: |
1007 case Bytecodes::_getstatic: | 1011 case Bytecodes::_getstatic: |
1008 { klassOop klass = resolve_field_return_klass(caller_method, bci, CHECK); | 1012 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK); |
1009 // Save a reference to the class that has to be checked for initialization | |
1010 init_klass = KlassHandle(THREAD, klass); | 1013 init_klass = KlassHandle(THREAD, klass); |
1011 k = klass->java_mirror(); | 1014 mirror = Handle(THREAD, klass->java_mirror()); |
1012 } | 1015 } |
1013 break; | 1016 break; |
1014 case Bytecodes::_new: | 1017 case Bytecodes::_new: |
1015 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci)); | 1018 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci)); |
1016 k = caller_method->constants()->klass_at(bnew.index(), CHECK); | 1019 k = caller_method->constants()->klass_at(bnew.index(), CHECK); |
1031 k = caller_method->constants()->klass_at(cc.index(), CHECK); | 1034 k = caller_method->constants()->klass_at(cc.index(), CHECK); |
1032 } | 1035 } |
1033 break; | 1036 break; |
1034 case Bytecodes::_anewarray: | 1037 case Bytecodes::_anewarray: |
1035 { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci)); | 1038 { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci)); |
1036 klassOop ek = caller_method->constants()->klass_at(anew.index(), CHECK); | 1039 Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK); |
1037 k = Klass::cast(ek)->array_klass(CHECK); | 1040 k = Klass::cast(ek)->array_klass(CHECK); |
1038 } | 1041 } |
1039 break; | 1042 break; |
1040 case Bytecodes::_ldc: | 1043 case Bytecodes::_ldc: |
1041 case Bytecodes::_ldc_w: | 1044 case Bytecodes::_ldc_w: |
1042 { | 1045 { |
1043 Bytecode_loadconstant cc(caller_method, bci); | 1046 Bytecode_loadconstant cc(caller_method, bci); |
1044 k = cc.resolve_constant(CHECK); | 1047 oop m = cc.resolve_constant(CHECK); |
1045 assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant"); | 1048 mirror = Handle(THREAD, m); |
1046 } | 1049 } |
1047 break; | 1050 break; |
1048 default: | 1051 default: |
1049 tty->print_cr("Unhandled bytecode: %d stub_id=%d caller=%s bci=%d pc=%d", code, stub_id, caller_method->name()->as_C_string(), bci, caller_frame.pc()); | 1052 tty->print_cr("Unhandled bytecode: %d stub_id=%d caller=%s bci=%d pc=%d", code, stub_id, caller_method->name()->as_C_string(), bci, caller_frame.pc()); |
1050 Unimplemented(); | 1053 Unimplemented(); |
1051 } | 1054 } |
1052 // convert to handle | 1055 // convert to handle |
1053 load_klass = Handle(THREAD, k); | 1056 load_klass = KlassHandle(THREAD, k); |
1054 } else { | 1057 } else { |
1055 ShouldNotReachHere(); | 1058 ShouldNotReachHere(); |
1056 } | 1059 } |
1057 | 1060 |
1058 if (deoptimize_for_volatile) { | 1061 if (deoptimize_for_volatile) { |
1074 // Return to the now deoptimized frame. | 1077 // Return to the now deoptimized frame. |
1075 } | 1078 } |
1076 | 1079 |
1077 // If we are patching in a non-perm oop, make sure the nmethod | 1080 // If we are patching in a non-perm oop, make sure the nmethod |
1078 // is on the right list. | 1081 // is on the right list. |
1079 if (ScavengeRootsInCode && load_klass.not_null() && load_klass->is_scavengable()) { | 1082 if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) { |
1080 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); | 1083 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); |
1081 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); | 1084 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); |
1082 guarantee(nm != NULL, "only nmethods can contain non-perm oops"); | 1085 guarantee(nm != NULL, "only nmethods can contain non-perm oops"); |
1083 if (!nm->on_scavenge_root_list()) | 1086 if (!nm->on_scavenge_root_list()) |
1084 CodeCache::add_scavenge_root_nmethod(nm); | 1087 CodeCache::add_scavenge_root_nmethod(nm); |
1139 // Set it now. | 1142 // Set it now. |
1140 NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff); | 1143 NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff); |
1141 assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type"); | 1144 assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type"); |
1142 assert(patch_field_offset >= 0, "illegal offset"); | 1145 assert(patch_field_offset >= 0, "illegal offset"); |
1143 n_move->add_offset_in_bytes(patch_field_offset); | 1146 n_move->add_offset_in_bytes(patch_field_offset); |
1144 } else if (stub_id == Runtime1::load_klass_patching_id) { | 1147 } else if (load_klass_or_mirror_patch_id) { |
1145 // If a getstatic or putstatic is referencing a klass which | 1148 // If a getstatic or putstatic is referencing a klass which |
1146 // isn't fully initialized, the patch body isn't copied into | 1149 // isn't fully initialized, the patch body isn't copied into |
1147 // place until initialization is complete. In this case the | 1150 // place until initialization is complete. In this case the |
1148 // patch site is setup so that any threads besides the | 1151 // patch site is setup so that any threads besides the |
1149 // initializing thread are forced to come into the VM and | 1152 // initializing thread are forced to come into the VM and |
1150 // block. | 1153 // block. |
1151 do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) || | 1154 do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) || |
1152 instanceKlass::cast(init_klass())->is_initialized(); | 1155 InstanceKlass::cast(init_klass())->is_initialized(); |
1153 NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc); | 1156 NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc); |
1154 if (jump->jump_destination() == being_initialized_entry) { | 1157 if (jump->jump_destination() == being_initialized_entry) { |
1155 assert(do_patch == true, "initialization must be complete at this point"); | 1158 assert(do_patch == true, "initialization must be complete at this point"); |
1156 } else { | 1159 } else { |
1157 // patch the instruction <move reg, klass> | 1160 // patch the instruction <move reg, klass> |
1158 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); | 1161 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); |
1159 | 1162 |
1160 assert(n_copy->data() == 0 || | 1163 assert(n_copy->data() == 0 || |
1161 n_copy->data() == (intptr_t)Universe::non_oop_word(), | 1164 n_copy->data() == (intptr_t)Universe::non_oop_word(), |
1162 "illegal init value"); | 1165 "illegal init value"); |
1166 if (stub_id == Runtime1::load_klass_patching_id) { | |
1163 assert(load_klass() != NULL, "klass not set"); | 1167 assert(load_klass() != NULL, "klass not set"); |
1164 n_copy->set_data((intx) (load_klass())); | 1168 n_copy->set_data((intx) (load_klass())); |
1169 } else { | |
1170 assert(mirror() != NULL, "klass not set"); | |
1171 n_copy->set_data((intx) (mirror())); | |
1172 } | |
1165 | 1173 |
1166 if (TracePatching) { | 1174 if (TracePatching) { |
1167 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); | 1175 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); |
1168 } | 1176 } |
1169 | 1177 |
1170 #if defined(SPARC) || defined(PPC) | 1178 #if defined(SPARC) || defined(PPC) |
1171 // Update the oop location in the nmethod with the proper | 1179 // Update the location in the nmethod with the proper |
1172 // oop. When the code was generated, a NULL was stuffed | 1180 // metadata. When the code was generated, a NULL was stuffed |
1173 // in the oop table and that table needs to be update to | 1181 // in the metadata table and that table needs to be update to |
1174 // have the right value. On intel the value is kept | 1182 // have the right value. On intel the value is kept |
1175 // directly in the instruction instead of in the oop | 1183 // directly in the instruction instead of in the metadata |
1176 // table, so set_data above effectively updated the value. | 1184 // table, so set_data above effectively updated the value. |
1177 nmethod* nm = CodeCache::find_nmethod(instr_pc); | 1185 nmethod* nm = CodeCache::find_nmethod(instr_pc); |
1178 assert(nm != NULL, "invalid nmethod_pc"); | 1186 assert(nm != NULL, "invalid nmethod_pc"); |
1179 RelocIterator oops(nm, copy_buff, copy_buff + 1); | 1187 RelocIterator mds(nm, copy_buff, copy_buff + 1); |
1180 bool found = false; | 1188 bool found = false; |
1181 while (oops.next() && !found) { | 1189 while (mds.next() && !found) { |
1182 if (oops.type() == relocInfo::oop_type) { | 1190 if (mds.type() == relocInfo::oop_type) { |
1183 oop_Relocation* r = oops.oop_reloc(); | 1191 assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id"); |
1192 oop_Relocation* r = mds.oop_reloc(); | |
1184 oop* oop_adr = r->oop_addr(); | 1193 oop* oop_adr = r->oop_addr(); |
1185 *oop_adr = load_klass(); | 1194 *oop_adr = mirror(); |
1186 r->fix_oop_relocation(); | 1195 r->fix_oop_relocation(); |
1196 found = true; | |
1197 } else if (mds.type() == relocInfo::metadata_type) { | |
1198 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id"); | |
1199 metadata_Relocation* r = mds.metadata_reloc(); | |
1200 Metadata** metadata_adr = r->metadata_addr(); | |
1201 *metadata_adr = load_klass(); | |
1202 r->fix_metadata_relocation(); | |
1187 found = true; | 1203 found = true; |
1188 } | 1204 } |
1189 } | 1205 } |
1190 assert(found, "the oop must exist!"); | 1206 assert(found, "the metadata must exist!"); |
1191 #endif | 1207 #endif |
1192 | 1208 |
1193 } | 1209 } |
1194 } else { | 1210 } else { |
1195 ShouldNotReachHere(); | 1211 ShouldNotReachHere(); |
1196 } | 1212 } |
1213 | |
1197 if (do_patch) { | 1214 if (do_patch) { |
1198 // replace instructions | 1215 // replace instructions |
1199 // first replace the tail, then the call | 1216 // first replace the tail, then the call |
1200 #ifdef ARM | 1217 #ifdef ARM |
1201 if(stub_id == Runtime1::load_klass_patching_id && !VM_Version::supports_movw()) { | 1218 if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) { |
1202 nmethod* nm = CodeCache::find_nmethod(instr_pc); | 1219 nmethod* nm = CodeCache::find_nmethod(instr_pc); |
1203 oop* oop_addr = NULL; | 1220 address addr = NULL; |
1204 assert(nm != NULL, "invalid nmethod_pc"); | 1221 assert(nm != NULL, "invalid nmethod_pc"); |
1205 RelocIterator oops(nm, copy_buff, copy_buff + 1); | 1222 RelocIterator mds(nm, copy_buff, copy_buff + 1); |
1206 while (oops.next()) { | 1223 while (mds.next()) { |
1207 if (oops.type() == relocInfo::oop_type) { | 1224 if (mds.type() == relocInfo::oop_type) { |
1208 oop_Relocation* r = oops.oop_reloc(); | 1225 assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id"); |
1209 oop_addr = r->oop_addr(); | 1226 oop_Relocation* r = mds.oop_reloc(); |
1227 addr = (address)r->oop_addr(); | |
1228 break; | |
1229 } else if (mds.type() == relocInfo::metadata_type) { | |
1230 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id"); | |
1231 metadata_Relocation* r = mds.metadata_reloc(); | |
1232 addr = (address)r->metadata_addr(); | |
1210 break; | 1233 break; |
1211 } | 1234 } |
1212 } | 1235 } |
1213 assert(oop_addr != NULL, "oop relocation must exist"); | 1236 assert(addr != NULL, "metadata relocation must exist"); |
1214 copy_buff -= *byte_count; | 1237 copy_buff -= *byte_count; |
1215 NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff); | 1238 NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff); |
1216 n_copy2->set_pc_relative_offset((address)oop_addr, instr_pc); | 1239 n_copy2->set_pc_relative_offset(addr, instr_pc); |
1217 } | 1240 } |
1218 #endif | 1241 #endif |
1219 | 1242 |
1220 for (int i = NativeCall::instruction_size; i < *byte_count; i++) { | 1243 for (int i = NativeCall::instruction_size; i < *byte_count; i++) { |
1221 address ptr = copy_buff + i; | 1244 address ptr = copy_buff + i; |
1224 *(unsigned char*)dst = (unsigned char) a_byte; | 1247 *(unsigned char*)dst = (unsigned char) a_byte; |
1225 } | 1248 } |
1226 ICache::invalidate_range(instr_pc, *byte_count); | 1249 ICache::invalidate_range(instr_pc, *byte_count); |
1227 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); | 1250 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); |
1228 | 1251 |
1229 if (stub_id == Runtime1::load_klass_patching_id) { | 1252 if (load_klass_or_mirror_patch_id) { |
1230 // update relocInfo to oop | 1253 relocInfo::relocType rtype = |
1254 (stub_id == Runtime1::load_klass_patching_id) ? | |
1255 relocInfo::metadata_type : | |
1256 relocInfo::oop_type; | |
1257 // update relocInfo to metadata | |
1231 nmethod* nm = CodeCache::find_nmethod(instr_pc); | 1258 nmethod* nm = CodeCache::find_nmethod(instr_pc); |
1232 assert(nm != NULL, "invalid nmethod_pc"); | 1259 assert(nm != NULL, "invalid nmethod_pc"); |
1233 | 1260 |
1234 // The old patch site is now a move instruction so update | 1261 // The old patch site is now a move instruction so update |
1235 // the reloc info so that it will get updated during | 1262 // the reloc info so that it will get updated during |
1236 // future GCs. | 1263 // future GCs. |
1237 RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1)); | 1264 RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1)); |
1238 relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc, | 1265 relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc, |
1239 relocInfo::none, relocInfo::oop_type); | 1266 relocInfo::none, rtype); |
1240 #ifdef SPARC | 1267 #ifdef SPARC |
1241 // Sparc takes two relocations for an oop so update the second one. | 1268 // Sparc takes two relocations for an metadata so update the second one. |
1242 address instr_pc2 = instr_pc + NativeMovConstReg::add_offset; | 1269 address instr_pc2 = instr_pc + NativeMovConstReg::add_offset; |
1243 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1); | 1270 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1); |
1244 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, | 1271 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, |
1245 relocInfo::none, relocInfo::oop_type); | 1272 relocInfo::none, rtype); |
1246 #endif | 1273 #endif |
1247 #ifdef PPC | 1274 #ifdef PPC |
1248 { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset; | 1275 { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset; |
1249 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1); | 1276 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1); |
1250 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, relocInfo::none, relocInfo::oop_type); | 1277 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, |
1278 relocInfo::none, rtype); | |
1251 } | 1279 } |
1252 #endif | 1280 #endif |
1253 } | 1281 } |
1254 | 1282 |
1255 } else { | 1283 } else { |
1279 { | 1307 { |
1280 // Enter VM mode | 1308 // Enter VM mode |
1281 | 1309 |
1282 ResetNoHandleMark rnhm; | 1310 ResetNoHandleMark rnhm; |
1283 patch_code(thread, load_klass_patching_id); | 1311 patch_code(thread, load_klass_patching_id); |
1312 } | |
1313 // Back in JAVA, use no oops DON'T safepoint | |
1314 | |
1315 // Return true if calling code is deoptimized | |
1316 | |
1317 return caller_is_deopted(); | |
1318 } | |
1319 | |
1320 int Runtime1::move_mirror_patching(JavaThread* thread) { | |
1321 // | |
1322 // NOTE: we are still in Java | |
1323 // | |
1324 Thread* THREAD = thread; | |
1325 debug_only(NoHandleMark nhm;) | |
1326 { | |
1327 // Enter VM mode | |
1328 | |
1329 ResetNoHandleMark rnhm; | |
1330 patch_code(thread, load_mirror_patching_id); | |
1284 } | 1331 } |
1285 // Back in JAVA, use no oops DON'T safepoint | 1332 // Back in JAVA, use no oops DON'T safepoint |
1286 | 1333 |
1287 // Return true if calling code is deoptimized | 1334 // Return true if calling code is deoptimized |
1288 | 1335 |
1348 bs->write_ref_array_pre(dst_addr, length); | 1395 bs->write_ref_array_pre(dst_addr, length); |
1349 Copy::conjoint_oops_atomic(src_addr, dst_addr, length); | 1396 Copy::conjoint_oops_atomic(src_addr, dst_addr, length); |
1350 bs->write_ref_array((HeapWord*)dst_addr, length); | 1397 bs->write_ref_array((HeapWord*)dst_addr, length); |
1351 return ac_ok; | 1398 return ac_ok; |
1352 } else { | 1399 } else { |
1353 klassOop bound = objArrayKlass::cast(dst->klass())->element_klass(); | 1400 Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass(); |
1354 klassOop stype = objArrayKlass::cast(src->klass())->element_klass(); | 1401 Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass(); |
1355 if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { | 1402 if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { |
1356 // Elements are guaranteed to be subtypes, so no check necessary | 1403 // Elements are guaranteed to be subtypes, so no check necessary |
1357 bs->write_ref_array_pre(dst_addr, length); | 1404 bs->write_ref_array_pre(dst_addr, length); |
1358 Copy::conjoint_oops_atomic(src_addr, dst_addr, length); | 1405 Copy::conjoint_oops_atomic(src_addr, dst_addr, length); |
1359 bs->write_ref_array((HeapWord*)dst_addr, length); | 1406 bs->write_ref_array((HeapWord*)dst_addr, length); |
1375 if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed; | 1422 if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed; |
1376 if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed; | 1423 if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed; |
1377 | 1424 |
1378 if (length == 0) return ac_ok; | 1425 if (length == 0) return ac_ok; |
1379 if (src->is_typeArray()) { | 1426 if (src->is_typeArray()) { |
1380 const klassOop klass_oop = src->klass(); | 1427 Klass* const klass_oop = src->klass(); |
1381 if (klass_oop != dst->klass()) return ac_failed; | 1428 if (klass_oop != dst->klass()) return ac_failed; |
1382 typeArrayKlass* klass = typeArrayKlass::cast(klass_oop); | 1429 TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop); |
1383 const int l2es = klass->log2_element_size(); | 1430 const int l2es = klass->log2_element_size(); |
1384 const int ihs = klass->array_header_in_bytes() / wordSize; | 1431 const int ihs = klass->array_header_in_bytes() / wordSize; |
1385 char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es); | 1432 char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es); |
1386 char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es); | 1433 char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es); |
1387 // Potential problem: memmove is not guaranteed to be word atomic | 1434 // Potential problem: memmove is not guaranteed to be word atomic |
1440 // e.g., on x86, GCC may clear only %al when returning a bool false, but | 1487 // e.g., on x86, GCC may clear only %al when returning a bool false, but |
1441 // JVM takes the whole %eax as the return value, which may misinterpret | 1488 // JVM takes the whole %eax as the return value, which may misinterpret |
1442 // the return value as a boolean true. | 1489 // the return value as a boolean true. |
1443 | 1490 |
1444 assert(mirror != NULL, "should null-check on mirror before calling"); | 1491 assert(mirror != NULL, "should null-check on mirror before calling"); |
1445 klassOop k = java_lang_Class::as_klassOop(mirror); | 1492 Klass* k = java_lang_Class::as_Klass(mirror); |
1446 return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0; | 1493 return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0; |
1447 JRT_END | 1494 JRT_END |
1448 | 1495 |
1449 | 1496 |
1450 #ifndef PRODUCT | 1497 #ifndef PRODUCT |