comparison src/share/vm/c1/c1_Runtime1.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents 8f37087fc13f
children d8ce2825b193
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
305 // Soft float adds more runtime names. 305 // Soft float adds more runtime names.
306 return pd_name_for_address(entry); 306 return pd_name_for_address(entry);
307 } 307 }
308 308
309 309
310 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, klassOopDesc* klass)) 310 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))
311 NOT_PRODUCT(_new_instance_slowcase_cnt++;) 311 NOT_PRODUCT(_new_instance_slowcase_cnt++;)
312 312
313 assert(oop(klass)->is_klass(), "not a class"); 313 assert(klass->is_klass(), "not a class");
314 instanceKlassHandle h(thread, klass); 314 instanceKlassHandle h(thread, klass);
315 h->check_valid_for_instantiation(true, CHECK); 315 h->check_valid_for_instantiation(true, CHECK);
316 // make sure klass is initialized 316 // make sure klass is initialized
317 h->initialize(CHECK); 317 h->initialize(CHECK);
318 // allocate instance and return via TLS 318 // allocate instance and return via TLS
319 oop obj = h->allocate_instance(CHECK); 319 oop obj = h->allocate_instance(CHECK);
320 thread->set_vm_result(obj); 320 thread->set_vm_result(obj);
321 JRT_END 321 JRT_END
322 322
323 323
324 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, klassOopDesc* klass, jint length)) 324 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, Klass* klass, jint length))
325 NOT_PRODUCT(_new_type_array_slowcase_cnt++;) 325 NOT_PRODUCT(_new_type_array_slowcase_cnt++;)
326 // Note: no handle for klass needed since they are not used 326 // Note: no handle for klass needed since they are not used
327 // anymore after new_typeArray() and no GC can happen before. 327 // anymore after new_typeArray() and no GC can happen before.
328 // (This may have to change if this code changes!) 328 // (This may have to change if this code changes!)
329 assert(oop(klass)->is_klass(), "not a class"); 329 assert(klass->is_klass(), "not a class");
330 BasicType elt_type = typeArrayKlass::cast(klass)->element_type(); 330 BasicType elt_type = typeArrayKlass::cast(klass)->element_type();
331 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK); 331 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
332 thread->set_vm_result(obj); 332 thread->set_vm_result(obj);
333 // This is pretty rare but this runtime patch is stressful to deoptimization 333 // This is pretty rare but this runtime patch is stressful to deoptimization
334 // if we deoptimize here so force a deopt to stress the path. 334 // if we deoptimize here so force a deopt to stress the path.
337 } 337 }
338 338
339 JRT_END 339 JRT_END
340 340
341 341
342 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, klassOopDesc* array_klass, jint length)) 342 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, Klass* array_klass, jint length))
343 NOT_PRODUCT(_new_object_array_slowcase_cnt++;) 343 NOT_PRODUCT(_new_object_array_slowcase_cnt++;)
344 344
345 // Note: no handle for klass needed since they are not used 345 // Note: no handle for klass needed since they are not used
346 // anymore after new_objArray() and no GC can happen before. 346 // anymore after new_objArray() and no GC can happen before.
347 // (This may have to change if this code changes!) 347 // (This may have to change if this code changes!)
348 assert(oop(array_klass)->is_klass(), "not a class"); 348 assert(array_klass->is_klass(), "not a class");
349 klassOop elem_klass = objArrayKlass::cast(array_klass)->element_klass(); 349 Klass* elem_klass = objArrayKlass::cast(array_klass)->element_klass();
350 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK); 350 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
351 thread->set_vm_result(obj); 351 thread->set_vm_result(obj);
352 // This is pretty rare but this runtime patch is stressful to deoptimization 352 // This is pretty rare but this runtime patch is stressful to deoptimization
353 // if we deoptimize here so force a deopt to stress the path. 353 // if we deoptimize here so force a deopt to stress the path.
354 if (DeoptimizeALot) { 354 if (DeoptimizeALot) {
355 deopt_caller(); 355 deopt_caller();
356 } 356 }
357 JRT_END 357 JRT_END
358 358
359 359
360 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, klassOopDesc* klass, int rank, jint* dims)) 360 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
361 NOT_PRODUCT(_new_multi_array_slowcase_cnt++;) 361 NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
362 362
363 assert(oop(klass)->is_klass(), "not a class"); 363 assert(klass->is_klass(), "not a class");
364 assert(rank >= 1, "rank must be nonzero"); 364 assert(rank >= 1, "rank must be nonzero");
365 oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); 365 oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
366 thread->set_vm_result(obj); 366 thread->set_vm_result(obj);
367 JRT_END 367 JRT_END
368 368
381 381
382 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method 382 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
383 // associated with the top activation record. The inlinee (that is possibly included in the enclosing 383 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
384 // method) method oop is passed as an argument. In order to do that it is embedded in the code as 384 // method) method oop is passed as an argument. In order to do that it is embedded in the code as
385 // a constant. 385 // a constant.
386 static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) { 386 static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, Method* m) {
387 nmethod* osr_nm = NULL; 387 nmethod* osr_nm = NULL;
388 methodHandle method(THREAD, m); 388 methodHandle method(THREAD, m);
389 389
390 RegisterMap map(THREAD, false); 390 RegisterMap map(THREAD, false);
391 frame fr = THREAD->last_frame().sender(&map); 391 frame fr = THREAD->last_frame().sender(&map);
421 osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD); 421 osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
422 assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions"); 422 assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
423 return osr_nm; 423 return osr_nm;
424 } 424 }
425 425
426 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, methodOopDesc* method)) 426 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, Method* method))
427 nmethod* osr_nm; 427 nmethod* osr_nm;
428 JRT_BLOCK 428 JRT_BLOCK
429 osr_nm = counter_overflow_helper(thread, bci, method); 429 osr_nm = counter_overflow_helper(thread, bci, method);
430 if (osr_nm != NULL) { 430 if (osr_nm != NULL) {
431 RegisterMap map(thread, false); 431 RegisterMap map(thread, false);
700 700
701 // Return to the now deoptimized frame. 701 // Return to the now deoptimized frame.
702 JRT_END 702 JRT_END
703 703
704 704
705 static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { 705 static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
706 Bytecode_field field_access(caller, bci); 706 Bytecode_field field_access(caller, bci);
707 // This can be static or non-static field access 707 // This can be static or non-static field access
708 Bytecodes::Code code = field_access.code(); 708 Bytecodes::Code code = field_access.code();
709 709
710 // We must load class, initialize class and resolvethe field 710 // We must load class, initialize class and resolvethe field
813 // this is used by assertions in the access_field_patching_id 813 // this is used by assertions in the access_field_patching_id
814 BasicType patch_field_type = T_ILLEGAL; 814 BasicType patch_field_type = T_ILLEGAL;
815 #endif // PRODUCT 815 #endif // PRODUCT
816 bool deoptimize_for_volatile = false; 816 bool deoptimize_for_volatile = false;
817 int patch_field_offset = -1; 817 int patch_field_offset = -1;
818 KlassHandle init_klass(THREAD, klassOop(NULL)); // klass needed by access_field_patching code 818 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
819 Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code 819 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
820 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
821 bool load_klass_or_mirror_patch_id =
822 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
823
820 if (stub_id == Runtime1::access_field_patching_id) { 824 if (stub_id == Runtime1::access_field_patching_id) {
821 825
822 Bytecode_field field_access(caller_method, bci); 826 Bytecode_field field_access(caller_method, bci);
823 FieldAccessInfo result; // initialize class if needed 827 FieldAccessInfo result; // initialize class if needed
824 Bytecodes::Code code = field_access.code(); 828 Bytecodes::Code code = field_access.code();
837 deoptimize_for_volatile = result.access_flags().is_volatile(); 841 deoptimize_for_volatile = result.access_flags().is_volatile();
838 842
839 #ifndef PRODUCT 843 #ifndef PRODUCT
840 patch_field_type = result.field_type(); 844 patch_field_type = result.field_type();
841 #endif 845 #endif
842 } else if (stub_id == Runtime1::load_klass_patching_id) { 846 } else if (load_klass_or_mirror_patch_id) {
843 oop k; 847 Klass* k = NULL;
844 switch (code) { 848 switch (code) {
845 case Bytecodes::_putstatic: 849 case Bytecodes::_putstatic:
846 case Bytecodes::_getstatic: 850 case Bytecodes::_getstatic:
847 { klassOop klass = resolve_field_return_klass(caller_method, bci, CHECK); 851 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
848 // Save a reference to the class that has to be checked for initialization
849 init_klass = KlassHandle(THREAD, klass); 852 init_klass = KlassHandle(THREAD, klass);
850 k = klass->java_mirror(); 853 mirror = Handle(THREAD, klass->java_mirror());
851 } 854 }
852 break; 855 break;
853 case Bytecodes::_new: 856 case Bytecodes::_new:
854 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci)); 857 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
855 k = caller_method->constants()->klass_at(bnew.index(), CHECK); 858 k = caller_method->constants()->klass_at(bnew.index(), CHECK);
870 k = caller_method->constants()->klass_at(cc.index(), CHECK); 873 k = caller_method->constants()->klass_at(cc.index(), CHECK);
871 } 874 }
872 break; 875 break;
873 case Bytecodes::_anewarray: 876 case Bytecodes::_anewarray:
874 { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci)); 877 { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
875 klassOop ek = caller_method->constants()->klass_at(anew.index(), CHECK); 878 Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
876 k = Klass::cast(ek)->array_klass(CHECK); 879 k = Klass::cast(ek)->array_klass(CHECK);
877 } 880 }
878 break; 881 break;
879 case Bytecodes::_ldc: 882 case Bytecodes::_ldc:
880 case Bytecodes::_ldc_w: 883 case Bytecodes::_ldc_w:
881 { 884 {
882 Bytecode_loadconstant cc(caller_method, bci); 885 Bytecode_loadconstant cc(caller_method, bci);
883 k = cc.resolve_constant(CHECK); 886 oop m = cc.resolve_constant(CHECK);
884 assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant"); 887 mirror = Handle(THREAD, m);
885 } 888 }
886 break; 889 break;
887 default: Unimplemented(); 890 default: Unimplemented();
888 } 891 }
889 // convert to handle 892 // convert to handle
890 load_klass = Handle(THREAD, k); 893 load_klass = KlassHandle(THREAD, k);
891 } else { 894 } else {
892 ShouldNotReachHere(); 895 ShouldNotReachHere();
893 } 896 }
894 897
895 if (deoptimize_for_volatile) { 898 if (deoptimize_for_volatile) {
911 // Return to the now deoptimized frame. 914 // Return to the now deoptimized frame.
912 } 915 }
913 916
914 // If we are patching in a non-perm oop, make sure the nmethod 917 // If we are patching in a non-perm oop, make sure the nmethod
915 // is on the right list. 918 // is on the right list.
916 if (ScavengeRootsInCode && load_klass.not_null() && load_klass->is_scavengable()) { 919 if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
917 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); 920 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
918 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 921 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
919 guarantee(nm != NULL, "only nmethods can contain non-perm oops"); 922 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
920 if (!nm->on_scavenge_root_list()) 923 if (!nm->on_scavenge_root_list())
921 CodeCache::add_scavenge_root_nmethod(nm); 924 CodeCache::add_scavenge_root_nmethod(nm);
976 // Set it now. 979 // Set it now.
977 NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff); 980 NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
978 assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type"); 981 assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
979 assert(patch_field_offset >= 0, "illegal offset"); 982 assert(patch_field_offset >= 0, "illegal offset");
980 n_move->add_offset_in_bytes(patch_field_offset); 983 n_move->add_offset_in_bytes(patch_field_offset);
981 } else if (stub_id == Runtime1::load_klass_patching_id) { 984 } else if (load_klass_or_mirror_patch_id) {
982 // If a getstatic or putstatic is referencing a klass which 985 // If a getstatic or putstatic is referencing a klass which
983 // isn't fully initialized, the patch body isn't copied into 986 // isn't fully initialized, the patch body isn't copied into
984 // place until initialization is complete. In this case the 987 // place until initialization is complete. In this case the
985 // patch site is setup so that any threads besides the 988 // patch site is setup so that any threads besides the
986 // initializing thread are forced to come into the VM and 989 // initializing thread are forced to come into the VM and
987 // block. 990 // block.
988 do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) || 991 do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
989 instanceKlass::cast(init_klass())->is_initialized(); 992 InstanceKlass::cast(init_klass())->is_initialized();
990 NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc); 993 NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
991 if (jump->jump_destination() == being_initialized_entry) { 994 if (jump->jump_destination() == being_initialized_entry) {
992 assert(do_patch == true, "initialization must be complete at this point"); 995 assert(do_patch == true, "initialization must be complete at this point");
993 } else { 996 } else {
994 // patch the instruction <move reg, klass> 997 // patch the instruction <move reg, klass>
995 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); 998 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
996 999
997 assert(n_copy->data() == 0 || 1000 assert(n_copy->data() == 0 ||
998 n_copy->data() == (intptr_t)Universe::non_oop_word(), 1001 n_copy->data() == (intptr_t)Universe::non_oop_word(),
999 "illegal init value"); 1002 "illegal init value");
1003 if (stub_id == Runtime1::load_klass_patching_id) {
1000 assert(load_klass() != NULL, "klass not set"); 1004 assert(load_klass() != NULL, "klass not set");
1001 n_copy->set_data((intx) (load_klass())); 1005 n_copy->set_data((intx) (load_klass()));
1006 } else {
1007 assert(mirror() != NULL, "klass not set");
1008 n_copy->set_data((intx) (mirror()));
1009 }
1002 1010
1003 if (TracePatching) { 1011 if (TracePatching) {
1004 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); 1012 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1005 } 1013 }
1006 1014
1007 #if defined(SPARC) || defined(PPC) 1015 #if defined(SPARC) || defined(PPC)
1008 // Update the oop location in the nmethod with the proper 1016 // Update the location in the nmethod with the proper
1009 // oop. When the code was generated, a NULL was stuffed 1017 // metadata. When the code was generated, a NULL was stuffed
1010 // in the oop table and that table needs to be update to 1018 // in the metadata table and that table needs to be update to
1011 // have the right value. On intel the value is kept 1019 // have the right value. On intel the value is kept
1012 // directly in the instruction instead of in the oop 1020 // directly in the instruction instead of in the metadata
1013 // table, so set_data above effectively updated the value. 1021 // table, so set_data above effectively updated the value.
1014 nmethod* nm = CodeCache::find_nmethod(instr_pc); 1022 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1015 assert(nm != NULL, "invalid nmethod_pc"); 1023 assert(nm != NULL, "invalid nmethod_pc");
1016 RelocIterator oops(nm, copy_buff, copy_buff + 1); 1024 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1017 bool found = false; 1025 bool found = false;
1018 while (oops.next() && !found) { 1026 while (mds.next() && !found) {
1019 if (oops.type() == relocInfo::oop_type) { 1027 if (mds.type() == relocInfo::oop_type) {
1020 oop_Relocation* r = oops.oop_reloc(); 1028 assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
1029 oop_Relocation* r = mds.oop_reloc();
1021 oop* oop_adr = r->oop_addr(); 1030 oop* oop_adr = r->oop_addr();
1022 *oop_adr = load_klass(); 1031 *oop_adr = mirror();
1023 r->fix_oop_relocation(); 1032 r->fix_oop_relocation();
1033 found = true;
1034 } else if (mds.type() == relocInfo::metadata_type) {
1035 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1036 metadata_Relocation* r = mds.metadata_reloc();
1037 Metadata** metadata_adr = r->metadata_addr();
1038 *metadata_adr = load_klass();
1039 r->fix_metadata_relocation();
1024 found = true; 1040 found = true;
1025 } 1041 }
1026 } 1042 }
1027 assert(found, "the oop must exist!"); 1043 assert(found, "the metadata must exist!");
1028 #endif 1044 #endif
1029 1045
1030 } 1046 }
1031 } else { 1047 } else {
1032 ShouldNotReachHere(); 1048 ShouldNotReachHere();
1033 } 1049 }
1050
1034 if (do_patch) { 1051 if (do_patch) {
1035 // replace instructions 1052 // replace instructions
1036 // first replace the tail, then the call 1053 // first replace the tail, then the call
1037 #ifdef ARM 1054 #ifdef ARM
1038 if(stub_id == Runtime1::load_klass_patching_id && !VM_Version::supports_movw()) { 1055 if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) {
1039 nmethod* nm = CodeCache::find_nmethod(instr_pc); 1056 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1040 oop* oop_addr = NULL; 1057 address addr = NULL;
1041 assert(nm != NULL, "invalid nmethod_pc"); 1058 assert(nm != NULL, "invalid nmethod_pc");
1042 RelocIterator oops(nm, copy_buff, copy_buff + 1); 1059 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1043 while (oops.next()) { 1060 while (mds.next()) {
1044 if (oops.type() == relocInfo::oop_type) { 1061 if (mds.type() == relocInfo::oop_type) {
1045 oop_Relocation* r = oops.oop_reloc(); 1062 assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
1046 oop_addr = r->oop_addr(); 1063 oop_Relocation* r = mds.oop_reloc();
1064 addr = (address)r->oop_addr();
1065 break;
1066 } else if (mds.type() == relocInfo::metadata_type) {
1067 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1068 metadata_Relocation* r = mds.metadata_reloc();
1069 addr = (address)r->metadata_addr();
1047 break; 1070 break;
1048 } 1071 }
1049 } 1072 }
1050 assert(oop_addr != NULL, "oop relocation must exist"); 1073 assert(addr != NULL, "metadata relocation must exist");
1051 copy_buff -= *byte_count; 1074 copy_buff -= *byte_count;
1052 NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff); 1075 NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
1053 n_copy2->set_pc_relative_offset((address)oop_addr, instr_pc); 1076 n_copy2->set_pc_relative_offset(addr, instr_pc);
1054 } 1077 }
1055 #endif 1078 #endif
1056 1079
1057 for (int i = NativeCall::instruction_size; i < *byte_count; i++) { 1080 for (int i = NativeCall::instruction_size; i < *byte_count; i++) {
1058 address ptr = copy_buff + i; 1081 address ptr = copy_buff + i;
1061 *(unsigned char*)dst = (unsigned char) a_byte; 1084 *(unsigned char*)dst = (unsigned char) a_byte;
1062 } 1085 }
1063 ICache::invalidate_range(instr_pc, *byte_count); 1086 ICache::invalidate_range(instr_pc, *byte_count);
1064 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); 1087 NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1065 1088
1066 if (stub_id == Runtime1::load_klass_patching_id) { 1089 if (load_klass_or_mirror_patch_id) {
1067 // update relocInfo to oop 1090 relocInfo::relocType rtype =
1091 (stub_id == Runtime1::load_klass_patching_id) ?
1092 relocInfo::metadata_type :
1093 relocInfo::oop_type;
1094 // update relocInfo to metadata
1068 nmethod* nm = CodeCache::find_nmethod(instr_pc); 1095 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1069 assert(nm != NULL, "invalid nmethod_pc"); 1096 assert(nm != NULL, "invalid nmethod_pc");
1070 1097
1071 // The old patch site is now a move instruction so update 1098 // The old patch site is now a move instruction so update
1072 // the reloc info so that it will get updated during 1099 // the reloc info so that it will get updated during
1073 // future GCs. 1100 // future GCs.
1074 RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1)); 1101 RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1075 relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc, 1102 relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1076 relocInfo::none, relocInfo::oop_type); 1103 relocInfo::none, rtype);
1077 #ifdef SPARC 1104 #ifdef SPARC
1078 // Sparc takes two relocations for an oop so update the second one. 1105 // Sparc takes two relocations for an metadata so update the second one.
1079 address instr_pc2 = instr_pc + NativeMovConstReg::add_offset; 1106 address instr_pc2 = instr_pc + NativeMovConstReg::add_offset;
1080 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1); 1107 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1081 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, 1108 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1082 relocInfo::none, relocInfo::oop_type); 1109 relocInfo::none, rtype);
1083 #endif 1110 #endif
1084 #ifdef PPC 1111 #ifdef PPC
1085 { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset; 1112 { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
1086 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1); 1113 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1087 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, relocInfo::none, relocInfo::oop_type); 1114 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1115 relocInfo::none, rtype);
1088 } 1116 }
1089 #endif 1117 #endif
1090 } 1118 }
1091 1119
1092 } else { 1120 } else {
1116 { 1144 {
1117 // Enter VM mode 1145 // Enter VM mode
1118 1146
1119 ResetNoHandleMark rnhm; 1147 ResetNoHandleMark rnhm;
1120 patch_code(thread, load_klass_patching_id); 1148 patch_code(thread, load_klass_patching_id);
1149 }
1150 // Back in JAVA, use no oops DON'T safepoint
1151
1152 // Return true if calling code is deoptimized
1153
1154 return caller_is_deopted();
1155 }
1156
1157 int Runtime1::move_mirror_patching(JavaThread* thread) {
1158 //
1159 // NOTE: we are still in Java
1160 //
1161 Thread* THREAD = thread;
1162 debug_only(NoHandleMark nhm;)
1163 {
1164 // Enter VM mode
1165
1166 ResetNoHandleMark rnhm;
1167 patch_code(thread, load_mirror_patching_id);
1121 } 1168 }
1122 // Back in JAVA, use no oops DON'T safepoint 1169 // Back in JAVA, use no oops DON'T safepoint
1123 1170
1124 // Return true if calling code is deoptimized 1171 // Return true if calling code is deoptimized
1125 1172
1185 bs->write_ref_array_pre(dst_addr, length); 1232 bs->write_ref_array_pre(dst_addr, length);
1186 Copy::conjoint_oops_atomic(src_addr, dst_addr, length); 1233 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1187 bs->write_ref_array((HeapWord*)dst_addr, length); 1234 bs->write_ref_array((HeapWord*)dst_addr, length);
1188 return ac_ok; 1235 return ac_ok;
1189 } else { 1236 } else {
1190 klassOop bound = objArrayKlass::cast(dst->klass())->element_klass(); 1237 Klass* bound = objArrayKlass::cast(dst->klass())->element_klass();
1191 klassOop stype = objArrayKlass::cast(src->klass())->element_klass(); 1238 Klass* stype = objArrayKlass::cast(src->klass())->element_klass();
1192 if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { 1239 if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
1193 // Elements are guaranteed to be subtypes, so no check necessary 1240 // Elements are guaranteed to be subtypes, so no check necessary
1194 bs->write_ref_array_pre(dst_addr, length); 1241 bs->write_ref_array_pre(dst_addr, length);
1195 Copy::conjoint_oops_atomic(src_addr, dst_addr, length); 1242 Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1196 bs->write_ref_array((HeapWord*)dst_addr, length); 1243 bs->write_ref_array((HeapWord*)dst_addr, length);
1212 if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed; 1259 if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
1213 if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed; 1260 if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
1214 1261
1215 if (length == 0) return ac_ok; 1262 if (length == 0) return ac_ok;
1216 if (src->is_typeArray()) { 1263 if (src->is_typeArray()) {
1217 const klassOop klass_oop = src->klass(); 1264 Klass* const klass_oop = src->klass();
1218 if (klass_oop != dst->klass()) return ac_failed; 1265 if (klass_oop != dst->klass()) return ac_failed;
1219 typeArrayKlass* klass = typeArrayKlass::cast(klass_oop); 1266 typeArrayKlass* klass = typeArrayKlass::cast(klass_oop);
1220 const int l2es = klass->log2_element_size(); 1267 const int l2es = klass->log2_element_size();
1221 const int ihs = klass->array_header_in_bytes() / wordSize; 1268 const int ihs = klass->array_header_in_bytes() / wordSize;
1222 char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es); 1269 char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es);
1277 // e.g., on x86, GCC may clear only %al when returning a bool false, but 1324 // e.g., on x86, GCC may clear only %al when returning a bool false, but
1278 // JVM takes the whole %eax as the return value, which may misinterpret 1325 // JVM takes the whole %eax as the return value, which may misinterpret
1279 // the return value as a boolean true. 1326 // the return value as a boolean true.
1280 1327
1281 assert(mirror != NULL, "should null-check on mirror before calling"); 1328 assert(mirror != NULL, "should null-check on mirror before calling");
1282 klassOop k = java_lang_Class::as_klassOop(mirror); 1329 Klass* k = java_lang_Class::as_Klass(mirror);
1283 return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0; 1330 return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
1284 JRT_END 1331 JRT_END
1285 1332
1286 1333
1287 #ifndef PRODUCT 1334 #ifndef PRODUCT