# HG changeset patch # User roland # Date 1377084885 -7200 # Node ID f98f5d48f511f3397c3448392b73c3e02bb105b3 # Parent b17d8f6d9ed7e5819bbec9b3911c2e9652655653 7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked Summary: Do patching rather bailing out for unlinked call with appendix Reviewed-by: twisti, kvn diff -r b17d8f6d9ed7 -r f98f5d48f511 src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp --- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -307,7 +307,7 @@ assert(a_byte == *start++, "should be the same code"); } #endif - } else if (_id == load_mirror_id) { + } else if (_id == load_mirror_id || _id == load_appendix_id) { // produce a copy of the load mirror instruction for use by the being initialized case #ifdef ASSERT address start = __ pc(); @@ -384,6 +384,7 @@ case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; + case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); @@ -397,7 +398,7 @@ ce->add_call_info_here(_info); __ br(Assembler::always, false, Assembler::pt, _patch_site_entry); __ delayed()->nop(); - if (_id == load_klass_id || _id == load_mirror_id) { + if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { CodeSection* cs = __ code_section(); address pc = (address)_pc_start; RelocIterator iter(cs, pc, pc + 1); diff -r b17d8f6d9ed7 -r f98f5d48f511 src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -520,7 +520,7 @@ void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { // Allocate a new index in table to hold the object once it's been patched int oop_index = __ oop_recorder()->allocate_oop_index(NULL); - PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index); + PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); diff -r b17d8f6d9ed7 -r f98f5d48f511 src/cpu/sparc/vm/c1_Runtime1_sparc.cpp --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -804,6 +804,12 @@ } break; + case load_appendix_patching_id: + { __ set_info("load_appendix_patching", dont_gc_arguments); + oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); + } + break; + case dtrace_object_alloc_id: { // O0: object __ set_info("dtrace_object_alloc", dont_gc_arguments); diff -r b17d8f6d9ed7 -r f98f5d48f511 src/cpu/x86/vm/c1_CodeStubs_x86.cpp --- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -402,6 +402,7 @@ case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; + case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); @@ -419,7 +420,7 @@ for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { __ nop(); } - if (_id == load_klass_id || _id == load_mirror_id) { + if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { CodeSection* cs = __ code_section(); RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none); diff -r b17d8f6d9ed7 -r f98f5d48f511 src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -362,7 +362,7 @@ void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { jobject o = NULL; - PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id); + PatchingStub* patch = new PatchingStub(_masm, patching_id(info)); __ movoop(reg, o); patching_epilog(patch, lir_patch_normal, reg, info); } diff -r b17d8f6d9ed7 -r f98f5d48f511 src/cpu/x86/vm/c1_Runtime1_x86.cpp --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -1499,6 +1499,13 @@ } break; + case load_appendix_patching_id: + { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); + // we should set up register map + oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); + } + break; + case dtrace_object_alloc_id: { // rax,: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/c1/c1_CodeStubs.hpp --- a/src/share/vm/c1/c1_CodeStubs.hpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/c1/c1_CodeStubs.hpp Wed Aug 21 13:34:45 2013 +0200 @@ -364,7 +364,8 @@ enum PatchID { access_field_id, load_klass_id, - load_mirror_id + load_mirror_id, + load_appendix_id }; enum constants { patch_info_size = 3 @@ -417,7 +418,7 @@ } NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start()); n_move->set_offset(field_offset); - } else if (_id == load_klass_id || _id == load_mirror_id) { + } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { assert(_obj != noreg, "must have register object for load_klass/load_mirror"); #ifdef ASSERT // verify that we're pointing at a NativeMovConstReg diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/c1/c1_GraphBuilder.cpp --- a/src/share/vm/c1/c1_GraphBuilder.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -1667,9 +1667,8 @@ const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); assert(declared_signature != NULL, "cannot be null"); - // FIXME bail out for now - if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) { - BAILOUT("unlinked call site (FIXME needs patching or recompile support)"); + if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) { + BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)"); } // we have to make sure the argument size (incl. the receiver) @@ -1713,10 +1712,23 @@ code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; break; } + } else { + if (bc_raw == Bytecodes::_invokehandle) { + assert(!will_link, "should come here only for unlinked call"); + code = Bytecodes::_invokespecial; + } } // Push appendix argument (MethodType, CallSite, etc.), if one. - if (stream()->has_appendix()) { + bool patch_for_appendix = false; + int patching_appendix_arg = 0; + if (C1PatchInvokeDynamic && + (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) { + Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before())); + apush(arg); + patch_for_appendix = true; + patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1; + } else if (stream()->has_appendix()) { ciObject* appendix = stream()->get_appendix(); Value arg = append(new Constant(new ObjectConstant(appendix))); apush(arg); @@ -1732,7 +1744,8 @@ if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() && !(// %%% FIXME: Are both of these relevant? target->is_method_handle_intrinsic() || - target->is_compiled_lambda_form())) { + target->is_compiled_lambda_form()) && + !patch_for_appendix) { Value receiver = NULL; ciInstanceKlass* receiver_klass = NULL; bool type_is_exact = false; @@ -1850,7 +1863,8 @@ // check if we could do inlining if (!PatchALot && Inline && klass->is_loaded() && (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized()) - && target->is_loaded()) { + && target->is_loaded() + && !patch_for_appendix) { // callee is known => check if we have static binding assert(target->is_loaded(), "callee must be known"); if (code == Bytecodes::_invokestatic || @@ -1901,7 +1915,7 @@ code == Bytecodes::_invokespecial || code == Bytecodes::_invokevirtual || code == Bytecodes::_invokeinterface; - Values* args = state()->pop_arguments(target->arg_size_no_receiver()); + Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg); Value recv = has_receiver ? apop() : NULL; int vtable_index = Method::invalid_vtable_index; diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/c1/c1_LIR.hpp --- a/src/share/vm/c1/c1_LIR.hpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/c1/c1_LIR.hpp Wed Aug 21 13:34:45 2013 +0200 @@ -1211,8 +1211,6 @@ bool is_invokedynamic() const { return code() == lir_dynamic_call; } bool is_method_handle_invoke() const { return - is_invokedynamic() // An invokedynamic is always a MethodHandle call site. - || method()->is_compiled_lambda_form() // Java-generated adapter || method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/c1/c1_LIRAssembler.cpp --- a/src/share/vm/c1/c1_LIRAssembler.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -93,12 +93,23 @@ default: ShouldNotReachHere(); } + } else if (patch->id() == PatchingStub::load_appendix_id) { + Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci()); + assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution"); } else { ShouldNotReachHere(); } #endif } +PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) { + IRScope* scope = info->scope(); + Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci()); + if (Bytecodes::has_optional_appendix(bc_raw)) { + return PatchingStub::load_appendix_id; + } + return PatchingStub::load_mirror_id; +} //--------------------------------------------------------------- diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/c1/c1_LIRAssembler.hpp --- a/src/share/vm/c1/c1_LIRAssembler.hpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Wed Aug 21 13:34:45 2013 +0200 @@ -119,6 +119,8 @@ void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op); + PatchingStub::PatchID patching_id(CodeEmitInfo* info); + public: LIR_Assembler(Compilation* c); ~LIR_Assembler(); diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/c1/c1_Runtime1.cpp --- a/src/share/vm/c1/c1_Runtime1.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/c1/c1_Runtime1.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -819,6 +819,7 @@ KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code + Handle appendix(THREAD, NULL); // oop needed by appendix_patching code bool load_klass_or_mirror_patch_id = (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); @@ -888,10 +889,32 @@ mirror = Handle(THREAD, m); } break; - default: Unimplemented(); + default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id"); } // convert to handle load_klass = KlassHandle(THREAD, k); + } else if (stub_id == load_appendix_patching_id) { + Bytecode_invoke bytecode(caller_method, bci); + Bytecodes::Code bc = bytecode.invoke_code(); + + CallInfo info; + constantPoolHandle pool(thread, caller_method->constants()); + int index = bytecode.index(); + LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK); + appendix = info.resolved_appendix(); + switch (bc) { + case Bytecodes::_invokehandle: { + int cache_index = ConstantPool::decode_cpcache_index(index, true); + assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index"); + pool->cache()->entry_at(cache_index)->set_method_handle(pool, info); + break; + } + case Bytecodes::_invokedynamic: { + pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info); + break; + } + default: fatal("unexpected bytecode for load_appendix_patching_id"); + } } else { ShouldNotReachHere(); } @@ -992,8 +1015,8 @@ n_copy->data() == (intptr_t)Universe::non_oop_word(), "illegal init value"); if (stub_id == Runtime1::load_klass_patching_id) { - assert(load_klass() != NULL, "klass not set"); - n_copy->set_data((intx) (load_klass())); + assert(load_klass() != NULL, "klass not set"); + n_copy->set_data((intx) (load_klass())); } else { assert(mirror() != NULL, "klass not set"); n_copy->set_data((intx) (mirror())); @@ -1002,43 +1025,55 @@ if (TracePatching) { Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); } + } + } else if (stub_id == Runtime1::load_appendix_patching_id) { + NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); + assert(n_copy->data() == 0 || + n_copy->data() == (intptr_t)Universe::non_oop_word(), + "illegal init value"); + n_copy->set_data((intx) (appendix())); -#if defined(SPARC) || defined(PPC) - // Update the location in the nmethod with the proper - // metadata. When the code was generated, a NULL was stuffed - // in the metadata table and that table needs to be update to - // have the right value. On intel the value is kept - // directly in the instruction instead of in the metadata - // table, so set_data above effectively updated the value. - nmethod* nm = CodeCache::find_nmethod(instr_pc); - assert(nm != NULL, "invalid nmethod_pc"); - RelocIterator mds(nm, copy_buff, copy_buff + 1); - bool found = false; - while (mds.next() && !found) { - if (mds.type() == relocInfo::oop_type) { - assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id"); - oop_Relocation* r = mds.oop_reloc(); - oop* oop_adr = r->oop_addr(); - *oop_adr = mirror(); - r->fix_oop_relocation(); - found = true; - } else if (mds.type() == relocInfo::metadata_type) { - assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id"); - metadata_Relocation* r = mds.metadata_reloc(); - Metadata** metadata_adr = r->metadata_addr(); - *metadata_adr = load_klass(); - r->fix_metadata_relocation(); - found = true; - } - } - assert(found, "the metadata must exist!"); -#endif - + if (TracePatching) { + Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); } } else { ShouldNotReachHere(); } +#if defined(SPARC) || defined(PPC) + if (load_klass_or_mirror_patch_id || + stub_id == Runtime1::load_appendix_patching_id) { + // Update the location in the nmethod with the proper + // metadata. When the code was generated, a NULL was stuffed + // in the metadata table and that table needs to be update to + // have the right value. On intel the value is kept + // directly in the instruction instead of in the metadata + // table, so set_data above effectively updated the value. + nmethod* nm = CodeCache::find_nmethod(instr_pc); + assert(nm != NULL, "invalid nmethod_pc"); + RelocIterator mds(nm, copy_buff, copy_buff + 1); + bool found = false; + while (mds.next() && !found) { + if (mds.type() == relocInfo::oop_type) { + assert(stub_id == Runtime1::load_mirror_patching_id || + stub_id == Runtime1::load_appendix_patching_id, "wrong stub id"); + oop_Relocation* r = mds.oop_reloc(); + oop* oop_adr = r->oop_addr(); + *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix(); + r->fix_oop_relocation(); + found = true; + } else if (mds.type() == relocInfo::metadata_type) { + assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id"); + metadata_Relocation* r = mds.metadata_reloc(); + Metadata** metadata_adr = r->metadata_addr(); + *metadata_adr = load_klass(); + r->fix_metadata_relocation(); + found = true; + } + } + assert(found, "the metadata must exist!"); + } +#endif if (do_patch) { // replace instructions // first replace the tail, then the call @@ -1077,7 +1112,8 @@ ICache::invalidate_range(instr_pc, *byte_count); NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); - if (load_klass_or_mirror_patch_id) { + if (load_klass_or_mirror_patch_id || + stub_id == Runtime1::load_appendix_patching_id) { relocInfo::relocType rtype = (stub_id == Runtime1::load_klass_patching_id) ? relocInfo::metadata_type : @@ -1118,7 +1154,8 @@ // If we are patching in a non-perm oop, make sure the nmethod // is on the right list. - if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) { + if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) || + (appendix.not_null() && appendix->is_scavengable()))) { MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); guarantee(nm != NULL, "only nmethods can contain non-perm oops"); @@ -1179,6 +1216,24 @@ return caller_is_deopted(); } +int Runtime1::move_appendix_patching(JavaThread* thread) { +// +// NOTE: we are still in Java +// + Thread* THREAD = thread; + debug_only(NoHandleMark nhm;) + { + // Enter VM mode + + ResetNoHandleMark rnhm; + patch_code(thread, load_appendix_patching_id); + } + // Back in JAVA, use no oops DON'T safepoint + + // Return true if calling code is deoptimized + + return caller_is_deopted(); +} // // Entry point for compiled code. We want to patch a nmethod. // We don't do a normal VM transition here because we want to diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/c1/c1_Runtime1.hpp --- a/src/share/vm/c1/c1_Runtime1.hpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/c1/c1_Runtime1.hpp Wed Aug 21 13:34:45 2013 +0200 @@ -67,6 +67,7 @@ stub(access_field_patching) \ stub(load_klass_patching) \ stub(load_mirror_patching) \ + stub(load_appendix_patching) \ stub(g1_pre_barrier_slow) \ stub(g1_post_barrier_slow) \ stub(fpu2long_stub) \ @@ -160,6 +161,7 @@ static int access_field_patching(JavaThread* thread); static int move_klass_patching(JavaThread* thread); static int move_mirror_patching(JavaThread* thread); + static int move_appendix_patching(JavaThread* thread); static void patch_code(JavaThread* thread, StubID stub_id); diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/c1/c1_globals.cpp --- a/src/share/vm/c1/c1_globals.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/c1/c1_globals.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -25,4 +25,4 @@ #include "precompiled.hpp" #include "c1/c1_globals.hpp" -C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG) +C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG) diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/c1/c1_globals.hpp --- a/src/share/vm/c1/c1_globals.hpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/c1/c1_globals.hpp Wed Aug 21 13:34:45 2013 +0200 @@ -54,7 +54,7 @@ // // Defines all global flags used by the client compiler. // -#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \ +#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \ \ /* Printing */ \ notproduct(bool, PrintC1Statistics, false, \ @@ -333,15 +333,19 @@ "Use CHA and exact type results at call sites when updating MDOs")\ \ product(bool, C1UpdateMethodData, trueInTiered, \ - "Update MethodData*s in Tier1-generated code") \ + "Update MethodData*s in Tier1-generated code") \ \ develop(bool, PrintCFGToFile, false, \ "print control flow graph to a separate file during compilation") \ \ + diagnostic(bool, C1PatchInvokeDynamic, true, \ + "Patch invokedynamic appendix not known at compile time") \ + \ + \ // Read default values for c1 globals -C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG) +C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG) #endif // SHARE_VM_C1_C1_GLOBALS_HPP diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/ci/ciEnv.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -1150,6 +1150,10 @@ record_method_not_compilable("out of memory"); } +ciInstance* ciEnv::unloaded_ciinstance() { + GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();) +} + void ciEnv::dump_replay_data(outputStream* out) { VM_ENTRY_MARK; MutexLocker ml(Compile_lock); diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/ci/ciEnv.hpp --- a/src/share/vm/ci/ciEnv.hpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/ci/ciEnv.hpp Wed Aug 21 13:34:45 2013 +0200 @@ -400,6 +400,7 @@ static ciInstanceKlass* unloaded_ciinstance_klass() { return _unloaded_ciinstance_klass; } + ciInstance* unloaded_ciinstance(); ciKlass* find_system_klass(ciSymbol* klass_name); // Note: To find a class from its name string, use ciSymbol::make, diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/ci/ciMethod.hpp --- a/src/share/vm/ci/ciMethod.hpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/ci/ciMethod.hpp Wed Aug 21 13:34:45 2013 +0200 @@ -177,6 +177,10 @@ address bcp = code() + bci; return Bytecodes::java_code_at(NULL, bcp); } + Bytecodes::Code raw_code_at_bci(int bci) { + address bcp = code() + bci; + return Bytecodes::code_at(NULL, bcp); + } BCEscapeAnalyzer *get_bcea(); ciMethodBlocks *get_method_blocks(); diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/ci/ciObjectFactory.cpp --- a/src/share/vm/ci/ciObjectFactory.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/ci/ciObjectFactory.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -563,7 +563,10 @@ return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass()); } - +ciInstance* ciObjectFactory::get_unloaded_object_constant() { + if (ciEnv::_Object_klass == NULL) return NULL; + return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass()); +} //------------------------------------------------------------------ // ciObjectFactory::get_empty_methodData diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/ci/ciObjectFactory.hpp --- a/src/share/vm/ci/ciObjectFactory.hpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/ci/ciObjectFactory.hpp Wed Aug 21 13:34:45 2013 +0200 @@ -131,6 +131,8 @@ ciInstance* get_unloaded_method_type_constant(ciSymbol* signature); + ciInstance* get_unloaded_object_constant(); + // Get the ciMethodData representing the methodData for a method // with none. ciMethodData* get_empty_methodData(); diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/runtime/globals.cpp --- a/src/share/vm/runtime/globals.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/runtime/globals.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -205,6 +205,7 @@ #define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT }, #define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT }, +#define C1_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 diagnostic}", DEFAULT }, #ifdef PRODUCT #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */ #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */ @@ -260,7 +261,7 @@ G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT) #endif // INCLUDE_ALL_GCS #ifdef COMPILER1 - C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT) + C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT) #endif #ifdef COMPILER2 C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT) diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/runtime/globals_extension.hpp --- a/src/share/vm/runtime/globals_extension.hpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/runtime/globals_extension.hpp Wed Aug 21 13:34:45 2013 +0200 @@ -57,6 +57,7 @@ #define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name), +#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #ifdef PRODUCT #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */ #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */ @@ -99,7 +100,7 @@ G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER) #endif // INCLUDE_ALL_GCS #ifdef COMPILER1 - C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER) + C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_DIAGNOSTIC_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER) #endif #ifdef COMPILER2 C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER) @@ -131,6 +132,7 @@ #define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #ifdef PRODUCT #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */ #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */ @@ -204,6 +206,7 @@ C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, C1_PRODUCT_FLAG_MEMBER_WITH_TYPE, C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, + C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE) #endif #ifdef COMPILER2 diff -r b17d8f6d9ed7 -r f98f5d48f511 src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Fri Aug 23 18:04:35 2013 -0700 +++ b/src/share/vm/runtime/sharedRuntime.cpp Wed Aug 21 13:34:45 2013 +0200 @@ -1051,7 +1051,8 @@ // Find receiver for non-static call if (bc != Bytecodes::_invokestatic && - bc != Bytecodes::_invokedynamic) { + bc != Bytecodes::_invokedynamic && + bc != Bytecodes::_invokehandle) { // This register map must be update since we need to find the receiver for // compiled frames. The receiver might be in a register. RegisterMap reg_map2(thread); @@ -1078,7 +1079,7 @@ #ifdef ASSERT // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls - if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) { + if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic && bc != Bytecodes::_invokehandle) { assert(receiver.not_null(), "should have thrown exception"); KlassHandle receiver_klass(THREAD, receiver->klass()); Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle)); @@ -1240,9 +1241,9 @@ #endif if (is_virtual) { - assert(receiver.not_null(), "sanity check"); + assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); bool static_bound = call_info.resolved_method()->can_be_statically_bound(); - KlassHandle h_klass(THREAD, receiver->klass()); + KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass()); CompiledIC::compute_monomorphic_entry(callee_method, h_klass, is_optimized, static_bound, virtual_call_info, CHECK_(methodHandle()));