# HG changeset patch # User jmasa # Date 1304112981 25200 # Node ID df0a92a7e30bf0e7e6e368dd5d2c187c60e15534 # Parent c6033dad9fd3847f0ac17e36741e111b9df15636# Parent 286c498ae0d4a66cf07b4b6e1efb99c99acf662b Merge diff -r c6033dad9fd3 -r df0a92a7e30b src/cpu/sparc/vm/methodHandles_sparc.cpp --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -486,7 +486,7 @@ if (ek == _invokespecial_mh) { // Must load & check the first argument before entering the target method. __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); - __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle); + __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); __ null_check(G3_method_handle); __ verify_oop(G3_method_handle); } diff -r c6033dad9fd3 -r df0a92a7e30b src/cpu/sparc/vm/templateTable_sparc.cpp --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -3293,8 +3293,6 @@ /*virtual*/ false, /*vfinal*/ false, /*indy*/ true); __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore - __ verify_oop(G5_callsite); - // profile this call __ profile_call(O4); @@ -3307,8 +3305,10 @@ __ sll(Rret, LogBytesPerWord, Rret); __ ld_ptr(Rtemp, Rret, Rret); // get return address + __ verify_oop(G5_callsite); __ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle); __ null_check(G3_method_handle); + __ verify_oop(G3_method_handle); // Adjust Rret first so Llast_SP can be same as Rret __ add(Rret, -frame::pc_return_offset, O7); diff -r c6033dad9fd3 -r df0a92a7e30b src/cpu/x86/vm/templateTable_x86_32.cpp --- a/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -422,7 +422,7 @@ Label L_done, L_throw_exception; const Register con_klass_temp = rcx; // same as Rcache - __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(con_klass_temp, rax); __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); __ jcc(Assembler::notEqual, L_done); __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0); @@ -432,7 +432,7 @@ // Load the exception from the system-array which wraps it: __ bind(L_throw_exception); - __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); + __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); __ jump(ExternalAddress(Interpreter::throw_exception_entry())); __ bind(L_done); @@ -946,9 +946,9 @@ __ jcc(Assembler::zero, is_null); // Move subklass into EBX - __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(rbx, rax); // Move superklass into EAX - __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes())); + __ load_klass(rax, rdx); __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes())); // Compress array+index*wordSize+12 into a single register. Frees ECX. __ lea(rdx, element_address); @@ -2001,7 +2001,7 @@ if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { assert(state == vtos, "only valid state"); __ movptr(rax, aaddress(0)); - __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(rdi, rax); __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(rdi, JVM_ACC_HAS_FINALIZER); Label skip_register_finalizer; @@ -2948,7 +2948,7 @@ // get receiver klass __ null_check(recv, oopDesc::klass_offset_in_bytes()); // Keep recv in rcx for callee expects it there - __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes())); + __ load_klass(rax, recv); __ verify_oop(rax); // profile this call @@ -3028,7 +3028,7 @@ // Get receiver klass into rdx - also a null check __ restore_locals(); // restore rdi - __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes())); + __ load_klass(rdx, rcx); __ verify_oop(rdx); // profile this call @@ -3083,6 +3083,7 @@ void TemplateTable::invokedynamic(int byte_no) { transition(vtos, vtos); + assert(byte_no == f1_oop, "use this argument"); if (!EnableInvokeDynamic) { // We should not encounter this bytecode if !EnableInvokeDynamic. @@ -3095,7 +3096,6 @@ return; } - assert(byte_no == f1_oop, "use this argument"); prepare_invoke(rax, rbx, byte_no); // rax: CallSite object (f1) @@ -3106,14 +3106,14 @@ Register rax_callsite = rax; Register rcx_method_handle = rcx; - if (ProfileInterpreter) { - // %%% should make a type profile for any invokedynamic that takes a ref argument - // profile this call - __ profile_call(rsi); - } - - __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx))); + // %%% should make a type profile for any invokedynamic that takes a ref argument + // profile this call + __ profile_call(rsi); + + __ verify_oop(rax_callsite); + __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx))); __ null_check(rcx_method_handle); + __ verify_oop(rcx_method_handle); __ prepare_to_jump_from_interpreted(); __ jump_to_method_handle_entry(rcx_method_handle, rdx); } @@ -3258,7 +3258,7 @@ (int32_t)markOopDesc::prototype()); // header __ pop(rcx); // get saved klass back in the register. } - __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass + __ store_klass(rax, rcx); // klass { SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0); @@ -3333,7 +3333,7 @@ __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc))); __ bind(resolved); - __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes())); + __ load_klass(rbx, rdx); // Generate subtype check. Blows ECX. Resets EDI. Object in EDX. // Superklass in EAX. Subklass in EBX. @@ -3376,12 +3376,12 @@ __ push(atos); call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); __ pop_ptr(rdx); - __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes())); + __ load_klass(rdx, rdx); __ jmp(resolved); // Get superklass in EAX and subklass in EDX __ bind(quicked); - __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(rdx, rax); __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc))); __ bind(resolved); diff -r c6033dad9fd3 -r df0a92a7e30b src/cpu/x86/vm/templateTable_x86_64.cpp --- a/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -436,7 +436,7 @@ Label L_done, L_throw_exception; const Register con_klass_temp = rcx; // same as cache const Register array_klass_temp = rdx; // same as index - __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(con_klass_temp, rax); __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); __ cmpptr(con_klass_temp, Address(array_klass_temp, 0)); __ jcc(Assembler::notEqual, L_done); @@ -447,7 +447,7 @@ // Load the exception from the system-array which wraps it: __ bind(L_throw_exception); - __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); + __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); __ jump(ExternalAddress(Interpreter::throw_exception_entry())); __ bind(L_done); @@ -3137,7 +3137,6 @@ return; } - assert(byte_no == f1_oop, "use this argument"); prepare_invoke(rax, rbx, byte_no); // rax: CallSite object (f1) @@ -3148,14 +3147,14 @@ Register rax_callsite = rax; Register rcx_method_handle = rcx; - if (ProfileInterpreter) { - // %%% should make a type profile for any invokedynamic that takes a ref argument - // profile this call - __ profile_call(r13); - } - - __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx))); + // %%% should make a type profile for any invokedynamic that takes a ref argument + // profile this call + __ profile_call(r13); + + __ verify_oop(rax_callsite); + __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx))); __ null_check(rcx_method_handle); + __ verify_oop(rcx_method_handle); __ prepare_to_jump_from_interpreted(); __ jump_to_method_handle_entry(rcx_method_handle, rdx); } diff -r c6033dad9fd3 -r df0a92a7e30b src/cpu/x86/vm/vm_version_x86.cpp --- a/src/cpu/x86/vm/vm_version_x86.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -441,12 +441,25 @@ } } - // On family 21 processors default is no sw prefetch - if ( cpu_family() == 21 ) { + // some defaults for AMD family 15h + if ( cpu_family() == 0x15 ) { + // On family 15h processors default is no sw prefetch if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { AllocatePrefetchStyle = 0; } + // Also, if some other prefetch style is specified, default instruction type is PREFETCHW + if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { + AllocatePrefetchInstr = 3; + } + // On family 15h processors use XMM and UnalignedLoadStores for Array Copy + if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) { + UseXMMForArrayCopy = true; + } + if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) { + UseUnalignedLoadStores = true; + } } + } if( is_intel() ) { // Intel cpus specific settings diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/ci/bcEscapeAnalyzer.cpp --- a/src/share/vm/ci/bcEscapeAnalyzer.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -232,14 +232,7 @@ } // compute size of arguments - int arg_size = target->arg_size(); - if (code == Bytecodes::_invokedynamic) { - assert(!target->is_static(), "receiver explicit in method"); - arg_size--; // implicit, not really on stack - } - if (!target->is_loaded() && code == Bytecodes::_invokestatic) { - arg_size--; - } + int arg_size = target->invoke_arg_size(code); int arg_base = MAX2(state._stack_height - arg_size, 0); // direct recursive calls are skipped if they can be bound statically without introducing diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/ci/ciEnv.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -756,7 +756,7 @@ assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic"); bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc); - if (is_resolved && (oop) cpool->cache()->secondary_entry_at(index)->f1() == NULL) + if (is_resolved && cpool->cache()->secondary_entry_at(index)->is_f1_null()) // FIXME: code generation could allow for null (unlinked) call site is_resolved = false; @@ -770,7 +770,7 @@ // Get the invoker methodOop from the constant pool. oop f1_value = cpool->cache()->main_entry_at(index)->f1(); - methodOop signature_invoker = methodOop(f1_value); + methodOop signature_invoker = (methodOop) f1_value; assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(), "correct result from LinkResolver::resolve_invokedynamic"); diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/ci/ciMethod.hpp --- a/src/share/vm/ci/ciMethod.hpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/ci/ciMethod.hpp Fri Apr 29 14:36:21 2011 -0700 @@ -127,7 +127,24 @@ ciSignature* signature() const { return _signature; } ciType* return_type() const { return _signature->return_type(); } int arg_size_no_receiver() const { return _signature->size(); } - int arg_size() const { return _signature->size() + (_flags.is_static() ? 0 : 1); } + // Can only be used on loaded ciMethods + int arg_size() const { + check_is_loaded(); + return _signature->size() + (_flags.is_static() ? 0 : 1); + } + // Report the number of elements on stack when invoking this method. + // This is different than the regular arg_size because invokdynamic + // has an implicit receiver. + int invoke_arg_size(Bytecodes::Code code) const { + int arg_size = _signature->size(); + // Add a receiver argument, maybe: + if (code != Bytecodes::_invokestatic && + code != Bytecodes::_invokedynamic) { + arg_size++; + } + return arg_size; + } + // Method code and related information. address code() { if (_code == NULL) load_code(); return _code; } @@ -276,9 +293,9 @@ void print_short_name(outputStream* st = tty); methodOop get_method_handle_target() { - klassOop receiver_limit_oop = NULL; - int flags = 0; - return MethodHandles::decode_method(get_oop(), receiver_limit_oop, flags); + KlassHandle receiver_limit; int flags = 0; + methodHandle m = MethodHandles::decode_method(get_oop(), receiver_limit, flags); + return m(); } }; diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/ci/ciObject.cpp --- a/src/share/vm/ci/ciObject.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/ci/ciObject.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -194,6 +194,16 @@ // ciObject::should_be_constant() bool ciObject::should_be_constant() { if (ScavengeRootsInCode >= 2) return true; // force everybody to be a constant + if (!JavaObjectsInPerm && !is_null_object()) { + // We want Strings and Classes to be embeddable by default since + // they used to be in the perm world. Not all Strings used to be + // embeddable but there's no easy way to distinguish the interned + // from the regulars ones so just treat them all that way. + ciEnv* env = CURRENT_ENV; + if (klass() == env->String_klass() || klass() == env->Class_klass()) { + return true; + } + } return handle() == NULL || !is_scavengable(); } diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/memory/genOopClosures.hpp --- a/src/share/vm/memory/genOopClosures.hpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/memory/genOopClosures.hpp Fri Apr 29 14:36:21 2011 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -175,7 +175,7 @@ protected: template inline void do_oop_work(T* p) { oop obj = oopDesc::load_decode_heap_oop(p); - guarantee(obj->is_oop_or_null(), "invalid oop"); + guarantee(obj->is_oop_or_null(), err_msg("invalid oop: " INTPTR_FORMAT, obj)); } public: virtual void do_oop(oop* p); diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/oops/cpCacheOop.cpp --- a/src/share/vm/oops/cpCacheOop.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/oops/cpCacheOop.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -104,7 +104,7 @@ void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL); bool success = (result == NULL); if (success) { - update_barrier_set(f1_addr, f1); + update_barrier_set((void*) f1_addr, f1); } } @@ -275,21 +275,23 @@ return (int) bsm_cache_index; } -void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, - methodHandle signature_invoker) { +void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) { assert(is_secondary_entry(), ""); + // NOTE: it's important that all other values are set before f1 is + // set since some users short circuit on f1 being set + // (i.e. non-null) and that may result in uninitialized values for + // other racing threads (e.g. flags). int param_size = signature_invoker->size_of_parameters(); assert(param_size >= 1, "method argument size must include MH.this"); - param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic - if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) { - // racing threads might be trying to install their own favorites - set_f1(call_site()); - } + param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic bool is_final = true; assert(signature_invoker->is_final_method(), "is_final"); - set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size); + int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size; + assert(_flags == 0 || _flags == flags, "flags should be the same"); + set_flags(flags); // do not do set_bytecode on a secondary CP cache entry //set_bytecode_1(Bytecodes::_invokedynamic); + set_f1_if_null_atomic(call_site()); // This must be the last one to set (see NOTE above)! } diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/opto/escape.cpp --- a/src/share/vm/opto/escape.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/opto/escape.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -1437,7 +1437,10 @@ // Update the memory inputs of MemNodes with the value we computed // in Phase 2 and move stores memory users to corresponding memory slices. -#ifdef ASSERT + + // Disable memory split verification code until the fix for 6984348. + // Currently it produces false negative results since it does not cover all cases. +#if 0 // ifdef ASSERT visited.Reset(); Node_Stack old_mems(arena, _compile->unique() >> 2); #endif @@ -1447,7 +1450,7 @@ Node *n = ptnode_adr(i)->_node; assert(n != NULL, "sanity"); if (n->is_Mem()) { -#ifdef ASSERT +#if 0 // ifdef ASSERT Node* old_mem = n->in(MemNode::Memory); if (!visited.test_set(old_mem->_idx)) { old_mems.push(old_mem, old_mem->outcnt()); @@ -1469,13 +1472,13 @@ } } } -#ifdef ASSERT +#if 0 // ifdef ASSERT // Verify that memory was split correctly while (old_mems.is_nonempty()) { Node* old_mem = old_mems.node(); uint old_cnt = old_mems.index(); old_mems.pop(); - assert(old_cnt = old_mem->outcnt(), "old mem could be lost"); + assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); } #endif } diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/opto/graphKit.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -1033,14 +1033,10 @@ iter.reset_to_bci(bci()); iter.next(); ciMethod* method = iter.get_method(ignore); - inputs = method->arg_size_no_receiver(); - // Add a receiver argument, maybe: - if (code != Bytecodes::_invokestatic && - code != Bytecodes::_invokedynamic) - inputs += 1; // (Do not use ciMethod::arg_size(), because // it might be an unloaded method, which doesn't // know whether it is static or not.) + inputs = method->invoke_arg_size(code); int size = method->return_type()->size(); depth = size - inputs; } @@ -2957,8 +2953,7 @@ //---------------------------set_output_for_allocation------------------------- Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, - const TypeOopPtr* oop_type, - bool raw_mem_only) { + const TypeOopPtr* oop_type) { int rawidx = Compile::AliasIdxRaw; alloc->set_req( TypeFunc::FramePtr, frameptr() ); add_safepoint_edges(alloc); @@ -2982,7 +2977,7 @@ rawoop)->as_Initialize(); assert(alloc->initialization() == init, "2-way macro link must work"); assert(init ->allocation() == alloc, "2-way macro link must work"); - if (ReduceFieldZeroing && !raw_mem_only) { + { // Extract memory strands which may participate in the new object's // initialization, and source them from the new InitializeNode. // This will allow us to observe initializations when they occur, @@ -3043,11 +3038,9 @@ // the type to a constant. // The optional arguments are for specialized use by intrinsics: // - If 'extra_slow_test' if not null is an extra condition for the slow-path. -// - If 'raw_mem_only', do not cast the result to an oop. // - If 'return_size_val', report the the total object size to the caller. Node* GraphKit::new_instance(Node* klass_node, Node* extra_slow_test, - bool raw_mem_only, // affect only raw memory Node* *return_size_val) { // Compute size in doublewords // The size is always an integral number of doublewords, represented @@ -3118,7 +3111,7 @@ size, klass_node, initial_slow_test); - return set_output_for_allocation(alloc, oop_type, raw_mem_only); + return set_output_for_allocation(alloc, oop_type); } //-------------------------------new_array------------------------------------- @@ -3128,7 +3121,6 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) Node* length, // number of array elements int nargs, // number of arguments to push back for uncommon trap - bool raw_mem_only, // affect only raw memory Node* *return_size_val) { jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); @@ -3273,7 +3265,7 @@ ary_type = ary_type->is_aryptr()->cast_to_size(length_type); } - Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only); + Node* javaoop = set_output_for_allocation(alloc, ary_type); // Cast length on remaining path to be as narrow as possible if (map()->find_edge(length) >= 0) { @@ -3462,9 +3454,22 @@ // Get the alias_index for raw card-mark memory int adr_type = Compile::AliasIdxRaw; + Node* zero = __ ConI(0); // Dirty card value + BasicType bt = T_BYTE; + + if (UseCondCardMark) { + // The classic GC reference write barrier is typically implemented + // as a store into the global card mark table. Unfortunately + // unconditional stores can result in false sharing and excessive + // coherence traffic as well as false transactional aborts. + // UseCondCardMark enables MP "polite" conditional card mark + // stores. In theory we could relax the load from ctrl() to + // no_ctrl, but that doesn't buy much latitude. + Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type); + __ if_then(card_val, BoolTest::ne, zero); + } + // Smash zero into card - Node* zero = __ ConI(0); - BasicType bt = T_BYTE; if( !UseConcMarkSweepGC ) { __ store(__ ctrl(), card_adr, zero, bt, adr_type); } else { @@ -3472,6 +3477,10 @@ __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type); } + if (UseCondCardMark) { + __ end_if(); + } + // Final sync IdealKit and GraphKit. final_sync(ideal); } diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/opto/graphKit.hpp --- a/src/share/vm/opto/graphKit.hpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/opto/graphKit.hpp Fri Apr 29 14:36:21 2011 -0700 @@ -773,15 +773,13 @@ // implementation of object creation Node* set_output_for_allocation(AllocateNode* alloc, - const TypeOopPtr* oop_type, - bool raw_mem_only); + const TypeOopPtr* oop_type); Node* get_layout_helper(Node* klass_node, jint& constant_value); Node* new_instance(Node* klass_node, Node* slow_test = NULL, - bool raw_mem_only = false, Node* *return_size_val = NULL); Node* new_array(Node* klass_node, Node* count_val, int nargs, - bool raw_mem_only = false, Node* *return_size_val = NULL); + Node* *return_size_val = NULL); // Handy for making control flow IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) { diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/opto/library_call.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -3527,8 +3527,7 @@ Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) ); Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); - const bool raw_mem_only = true; - newcopy = new_array(klass_node, length, 0, raw_mem_only); + newcopy = new_array(klass_node, length, 0); // Generate a direct call to the right arraycopy function(s). // We know the copy is disjoint but we might not know if the @@ -4325,8 +4324,6 @@ const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; int raw_adr_idx = Compile::AliasIdxRaw; - const bool raw_mem_only = true; - Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); if (array_ctl != NULL) { @@ -4335,8 +4332,7 @@ set_control(array_ctl); Node* obj_length = load_array_length(obj); Node* obj_size = NULL; - Node* alloc_obj = new_array(obj_klass, obj_length, 0, - raw_mem_only, &obj_size); + Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); if (!use_ReduceInitialCardMarks()) { // If it is an oop array, it requires very special treatment, @@ -4408,7 +4404,7 @@ // It's an instance, and it passed the slow-path tests. PreserveJVMState pjvms(this); Node* obj_size = NULL; - Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size); + Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size); copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks()); diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/opto/loopopts.cpp --- a/src/share/vm/opto/loopopts.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/opto/loopopts.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -2262,6 +2262,9 @@ // stmt1 // | // v +// loop predicate +// | +// v // stmt2 clone // | // v @@ -2272,9 +2275,6 @@ // : false true // : | | // : | v -// : | loop predicate -// : | | -// : | v // : | newloop<-----+ // : | | | // : | stmt3 clone | @@ -2330,7 +2330,6 @@ } } - Node* entry = head->in(LoopNode::EntryControl); int dd = dom_depth(head); // Step 1: find cut point @@ -2627,8 +2626,6 @@ // Backedge of the surviving new_head (the clone) is original last_peel _igvn.hash_delete(new_head_clone); - Node* new_entry = move_loop_predicates(entry, new_head_clone->in(LoopNode::EntryControl)); - new_head_clone->set_req(LoopNode::EntryControl, new_entry); new_head_clone->set_req(LoopNode::LoopBackControl, last_peel); _igvn._worklist.push(new_head_clone); diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/opto/macro.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -221,9 +221,16 @@ Node *shift = p2x->unique_out(); Node *addp = shift->unique_out(); for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) { - Node *st = addp->last_out(j); - assert(st->is_Store(), "store required"); - _igvn.replace_node(st, st->in(MemNode::Memory)); + Node *mem = addp->last_out(j); + if (UseCondCardMark && mem->is_Load()) { + assert(mem->Opcode() == Op_LoadB, "unexpected code shape"); + // The load is checking if the card has been written so + // replace it with zero to fold the test. + _igvn.replace_node(mem, intcon(0)); + continue; + } + assert(mem->is_Store(), "store required"); + _igvn.replace_node(mem, mem->in(MemNode::Memory)); } } else { // G1 pre/post barriers diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/opto/memnode.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -1259,15 +1259,18 @@ return NULL; // Wait stable graph } uint cnt = mem->req(); - for( uint i = 1; i < cnt; i++ ) { + for (uint i = 1; i < cnt; i++) { + Node* rc = region->in(i); + if (rc == NULL || phase->type(rc) == Type::TOP) + return NULL; // Wait stable graph Node *in = mem->in(i); - if( in == NULL ) { + if (in == NULL) { return NULL; // Wait stable graph } } // Check for loop invariant. if (cnt == 3) { - for( uint i = 1; i < cnt; i++ ) { + for (uint i = 1; i < cnt; i++) { Node *in = mem->in(i); Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); if (m == mem) { @@ -1281,38 +1284,37 @@ // Do nothing here if Identity will find a value // (to avoid infinite chain of value phis generation). - if ( !phase->eqv(this, this->Identity(phase)) ) + if (!phase->eqv(this, this->Identity(phase))) return NULL; // Skip the split if the region dominates some control edge of the address. - if (cnt == 3 && !MemNode::all_controls_dominate(address, region)) + if (!MemNode::all_controls_dominate(address, region)) return NULL; const Type* this_type = this->bottom_type(); int this_index = phase->C->get_alias_index(addr_t); int this_offset = addr_t->offset(); int this_iid = addr_t->is_oopptr()->instance_id(); - int wins = 0; PhaseIterGVN *igvn = phase->is_IterGVN(); Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); - for( uint i = 1; i < region->req(); i++ ) { + for (uint i = 1; i < region->req(); i++) { Node *x; Node* the_clone = NULL; - if( region->in(i) == phase->C->top() ) { + if (region->in(i) == phase->C->top()) { x = phase->C->top(); // Dead path? Use a dead data op } else { x = this->clone(); // Else clone up the data op the_clone = x; // Remember for possible deletion. // Alter data node to use pre-phi inputs - if( this->in(0) == region ) { - x->set_req( 0, region->in(i) ); + if (this->in(0) == region) { + x->set_req(0, region->in(i)); } else { - x->set_req( 0, NULL ); + x->set_req(0, NULL); } - for( uint j = 1; j < this->req(); j++ ) { + for (uint j = 1; j < this->req(); j++) { Node *in = this->in(j); - if( in->is_Phi() && in->in(0) == region ) - x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone + if (in->is_Phi() && in->in(0) == region) + x->set_req(j, in->in(i)); // Use pre-Phi input for the clone } } // Check for a 'win' on some paths @@ -1321,12 +1323,11 @@ bool singleton = t->singleton(); // See comments in PhaseIdealLoop::split_thru_phi(). - if( singleton && t == Type::TOP ) { + if (singleton && t == Type::TOP) { singleton &= region->is_Loop() && (i != LoopNode::EntryControl); } - if( singleton ) { - wins++; + if (singleton) { x = igvn->makecon(t); } else { // We now call Identity to try to simplify the cloned node. @@ -1340,13 +1341,11 @@ // igvn->type(x) is set to x->Value() already. x->raise_bottom_type(t); Node *y = x->Identity(igvn); - if( y != x ) { - wins++; + if (y != x) { x = y; } else { y = igvn->hash_find(x); - if( y ) { - wins++; + if (y) { x = y; } else { // Else x is a new node we are keeping @@ -1360,13 +1359,9 @@ igvn->remove_dead_node(the_clone); phi->set_req(i, x); } - if( wins > 0 ) { - // Record Phi - igvn->register_new_node_with_optimizer(phi); - return phi; - } - igvn->remove_dead_node(phi); - return NULL; + // Record Phi + igvn->register_new_node_with_optimizer(phi); + return phi; } //------------------------------Ideal------------------------------------------ @@ -1677,14 +1672,15 @@ // If we are loading from a freshly-allocated object, produce a zero, // if the load is provably beyond the header of the object. // (Also allow a variable load from a fresh array to produce zero.) - if (ReduceFieldZeroing) { + const TypeOopPtr *tinst = tp->isa_oopptr(); + bool is_instance = (tinst != NULL) && tinst->is_known_instance_field(); + if (ReduceFieldZeroing || is_instance) { Node* value = can_see_stored_value(mem,phase); if (value != NULL && value->is_Con()) return value->bottom_type(); } - const TypeOopPtr *tinst = tp->isa_oopptr(); - if (tinst != NULL && tinst->is_known_instance_field()) { + if (is_instance) { // If we have an instance type and our memory input is the // programs's initial memory state, there is no matching store, // so just return a zero of the appropriate type diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/opto/stringopts.cpp --- a/src/share/vm/opto/stringopts.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/opto/stringopts.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -1172,16 +1172,16 @@ Node* PhaseStringOpts::copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start) { Node* string = str; - Node* offset = kit.make_load(NULL, + Node* offset = kit.make_load(kit.control(), kit.basic_plus_adr(string, string, java_lang_String::offset_offset_in_bytes()), TypeInt::INT, T_INT, offset_field_idx); - Node* count = kit.make_load(NULL, + Node* count = kit.make_load(kit.control(), kit.basic_plus_adr(string, string, java_lang_String::count_offset_in_bytes()), TypeInt::INT, T_INT, count_field_idx); const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, TypeAry::make(TypeInt::CHAR,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, 0); - Node* value = kit.make_load(NULL, + Node* value = kit.make_load(kit.control(), kit.basic_plus_adr(string, string, java_lang_String::value_offset_in_bytes()), value_type, T_OBJECT, value_field_idx); @@ -1342,7 +1342,7 @@ } // Node* offset = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, offset_offset), // TypeInt::INT, T_INT, offset_field_idx); - Node* count = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()), + Node* count = kit.make_load(kit.control(), kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()), TypeInt::INT, T_INT, count_field_idx); length = __ AddI(length, count); string_sizes->init_req(argi, NULL); diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/prims/methodHandleWalk.cpp --- a/src/share/vm/prims/methodHandleWalk.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/prims/methodHandleWalk.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -82,10 +82,8 @@ void MethodHandleChain::set_last_method(oop target, TRAPS) { _is_last = true; - klassOop receiver_limit_oop = NULL; - int flags = 0; - methodOop m = MethodHandles::decode_method(target, receiver_limit_oop, flags); - _last_method = methodHandle(THREAD, m); + KlassHandle receiver_limit; int flags = 0; + _last_method = MethodHandles::decode_method(target, receiver_limit, flags); if ((flags & MethodHandles::_dmf_has_receiver) == 0) _last_invoke = Bytecodes::_invokestatic; else if ((flags & MethodHandles::_dmf_does_dispatch) == 0) diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/prims/methodHandles.cpp --- a/src/share/vm/prims/methodHandles.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/prims/methodHandles.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -153,9 +153,9 @@ // and local, like parse a data structure. For speed, such methods work on plain // oops, not handles. Trapping methods uniformly operate on handles. -methodOop MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype, - klassOop& receiver_limit_result, int& decode_flags_result) { - if (vmtarget == NULL) return NULL; +methodHandle MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype, + KlassHandle& receiver_limit_result, int& decode_flags_result) { + if (vmtarget == NULL) return methodHandle(); assert(methodOopDesc::nonvirtual_vtable_index < 0, "encoding"); if (vmindex < 0) { // this DMH performs no dispatch; it is directly bound to a methodOop @@ -198,20 +198,20 @@ // MemberName and DirectMethodHandle have the same linkage to the JVM internals. // (MemberName is the non-operational name used for queries and setup.) -methodOop MethodHandles::decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { +methodHandle MethodHandles::decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { oop vmtarget = java_lang_invoke_DirectMethodHandle::vmtarget(mh); int vmindex = java_lang_invoke_DirectMethodHandle::vmindex(mh); oop mtype = java_lang_invoke_DirectMethodHandle::type(mh); return decode_vmtarget(vmtarget, vmindex, mtype, receiver_limit_result, decode_flags_result); } -methodOop MethodHandles::decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { +methodHandle MethodHandles::decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { assert(java_lang_invoke_BoundMethodHandle::is_instance(mh), ""); assert(mh->klass() != SystemDictionary::AdapterMethodHandle_klass(), ""); for (oop bmh = mh;;) { // Bound MHs can be stacked to bind several arguments. oop target = java_lang_invoke_MethodHandle::vmtarget(bmh); - if (target == NULL) return NULL; + if (target == NULL) return methodHandle(); decode_flags_result |= MethodHandles::_dmf_binds_argument; klassOop tk = target->klass(); if (tk == SystemDictionary::BoundMethodHandle_klass()) { @@ -236,14 +236,14 @@ } } -methodOop MethodHandles::decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { +methodHandle MethodHandles::decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { assert(mh->klass() == SystemDictionary::AdapterMethodHandle_klass(), ""); for (oop amh = mh;;) { // Adapter MHs can be stacked to convert several arguments. int conv_op = adapter_conversion_op(java_lang_invoke_AdapterMethodHandle::conversion(amh)); decode_flags_result |= (_dmf_adapter_lsb << conv_op) & _DMF_ADAPTER_MASK; oop target = java_lang_invoke_MethodHandle::vmtarget(amh); - if (target == NULL) return NULL; + if (target == NULL) return methodHandle(); klassOop tk = target->klass(); if (tk == SystemDictionary::AdapterMethodHandle_klass()) { amh = target; @@ -255,8 +255,8 @@ } } -methodOop MethodHandles::decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { - if (mh == NULL) return NULL; +methodHandle MethodHandles::decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { + if (mh == NULL) return methodHandle(); klassOop mhk = mh->klass(); assert(java_lang_invoke_MethodHandle::is_subclass(mhk), "must be a MethodHandle"); if (mhk == SystemDictionary::DirectMethodHandle_klass()) { @@ -270,7 +270,7 @@ return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result); } else { assert(false, "cannot parse this MH"); - return NULL; // random MH? + return methodHandle(); // random MH? } } @@ -299,9 +299,9 @@ // A trusted party is handing us a cookie to determine a method. // Let's boil it down to the method oop they really want. -methodOop MethodHandles::decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result) { +methodHandle MethodHandles::decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result) { decode_flags_result = 0; - receiver_limit_result = NULL; + receiver_limit_result = KlassHandle(); klassOop xk = x->klass(); if (xk == Universe::methodKlassObj()) { return decode_methodOop((methodOop) x, decode_flags_result); @@ -329,7 +329,7 @@ assert(!x->is_method(), "already checked"); assert(!java_lang_invoke_MemberName::is_instance(x), "already checked"); } - return NULL; + return methodHandle(); } @@ -389,11 +389,10 @@ int offset = instanceKlass::cast(k)->offset_from_fields(slot); init_MemberName(mname_oop, k, accessFlags_from(mods), offset); } else { - int decode_flags = 0; klassOop receiver_limit = NULL; - methodOop m = MethodHandles::decode_method(target_oop, - receiver_limit, decode_flags); + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = MethodHandles::decode_method(target_oop, receiver_limit, decode_flags); bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); - init_MemberName(mname_oop, m, do_dispatch); + init_MemberName(mname_oop, m(), do_dispatch); } } @@ -423,13 +422,14 @@ } -methodOop MethodHandles::decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result) { +methodHandle MethodHandles::decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result) { + methodHandle empty; int flags = java_lang_invoke_MemberName::flags(mname); - if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0) return NULL; // not invocable + if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0) return empty; // not invocable oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname); int vmindex = java_lang_invoke_MemberName::vmindex(mname); - if (vmindex == VM_INDEX_UNINITIALIZED) return NULL; // not resolved - methodOop m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result); + if (vmindex == VM_INDEX_UNINITIALIZED) return empty; // not resolved + methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result); oop clazz = java_lang_invoke_MemberName::clazz(mname); if (clazz != NULL && java_lang_Class::is_instance(clazz)) { klassOop klass = java_lang_Class::as_klassOop(clazz); @@ -439,9 +439,7 @@ } // convert the external string or reflective type to an internal signature -Symbol* MethodHandles::convert_to_signature(oop type_str, - bool polymorphic, - TRAPS) { +Symbol* MethodHandles::convert_to_signature(oop type_str, bool polymorphic, TRAPS) { if (java_lang_invoke_MethodType::is_instance(type_str)) { return java_lang_invoke_MethodType::as_signature(type_str, polymorphic, CHECK_NULL); } else if (java_lang_Class::is_instance(type_str)) { @@ -474,48 +472,48 @@ #endif if (java_lang_invoke_MemberName::vmindex(mname()) != VM_INDEX_UNINITIALIZED) return; // already resolved - oop defc_oop = java_lang_invoke_MemberName::clazz(mname()); - oop name_str = java_lang_invoke_MemberName::name(mname()); - oop type_str = java_lang_invoke_MemberName::type(mname()); - int flags = java_lang_invoke_MemberName::flags(mname()); + Handle defc_oop(THREAD, java_lang_invoke_MemberName::clazz(mname())); + Handle name_str(THREAD, java_lang_invoke_MemberName::name( mname())); + Handle type_str(THREAD, java_lang_invoke_MemberName::type( mname())); + int flags = java_lang_invoke_MemberName::flags(mname()); - if (defc_oop == NULL || name_str == NULL || type_str == NULL) { + if (defc_oop.is_null() || name_str.is_null() || type_str.is_null()) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve"); } - klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop); - defc_oop = NULL; // safety - if (defc_klassOop == NULL) return; // a primitive; no resolution possible - if (!Klass::cast(defc_klassOop)->oop_is_instance()) { - if (!Klass::cast(defc_klassOop)->oop_is_array()) return; - defc_klassOop = SystemDictionary::Object_klass(); + + instanceKlassHandle defc; + { + klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop()); + if (defc_klassOop == NULL) return; // a primitive; no resolution possible + if (!Klass::cast(defc_klassOop)->oop_is_instance()) { + if (!Klass::cast(defc_klassOop)->oop_is_array()) return; + defc_klassOop = SystemDictionary::Object_klass(); + } + defc = instanceKlassHandle(THREAD, defc_klassOop); } - instanceKlassHandle defc(THREAD, defc_klassOop); - defc_klassOop = NULL; // safety if (defc.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "primitive class"); } - defc->link_class(CHECK); + defc->link_class(CHECK); // possible safepoint // convert the external string name to an internal symbol - TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str); + TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str()); if (name == NULL) return; // no such name - name_str = NULL; // safety Handle polymorphic_method_type; bool polymorphic_signature = false; if ((flags & ALL_KINDS) == IS_METHOD && (defc() == SystemDictionary::MethodHandle_klass() && - methodOopDesc::is_method_handle_invoke_name(name))) + methodOopDesc::is_method_handle_invoke_name(name))) { polymorphic_signature = true; + } // convert the external string or reflective type to an internal signature - TempNewSymbol type = convert_to_signature(type_str, polymorphic_signature, CHECK); - if (java_lang_invoke_MethodType::is_instance(type_str) && polymorphic_signature) { - polymorphic_method_type = Handle(THREAD, type_str); //preserve exactly + TempNewSymbol type = convert_to_signature(type_str(), polymorphic_signature, CHECK); + if (java_lang_invoke_MethodType::is_instance(type_str()) && polymorphic_signature) { + polymorphic_method_type = type_str; // preserve exactly } - if (type == NULL) return; // no such signature exists in the VM - type_str = NULL; // safety // Time to do the lookup. switch (flags & ALL_KINDS) { @@ -560,8 +558,8 @@ java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); java_lang_invoke_MemberName::set_modifiers(mname(), mods); - DEBUG_ONLY(int junk; klassOop junk2); - assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(), + DEBUG_ONLY(KlassHandle junk1; int junk2); + assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(), "properly stored for later decoding"); return; } @@ -589,8 +587,8 @@ java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); java_lang_invoke_MemberName::set_modifiers(mname(), mods); - DEBUG_ONLY(int junk; klassOop junk2); - assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(), + DEBUG_ONLY(KlassHandle junk1; int junk2); + assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(), "properly stored for later decoding"); return; } @@ -677,16 +675,14 @@ case IS_METHOD: case IS_CONSTRUCTOR: { - klassOop receiver_limit = NULL; - int decode_flags = 0; - methodHandle m(THREAD, decode_vmtarget(vmtarget, vmindex, NULL, - receiver_limit, decode_flags)); + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit, decode_flags); if (m.is_null()) break; if (!have_defc) { klassOop defc = m->method_holder(); - if (receiver_limit != NULL && receiver_limit != defc - && Klass::cast(receiver_limit)->is_subtype_of(defc)) - defc = receiver_limit; + if (receiver_limit.not_null() && receiver_limit() != defc + && Klass::cast(receiver_limit())->is_subtype_of(defc)) + defc = receiver_limit(); java_lang_invoke_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror()); } if (!have_name) { @@ -884,10 +880,9 @@ // - AMH can have methodOop for static invoke with bound receiver // - DMH can have methodOop for static invoke (on variable receiver) // - DMH can have klassOop for dispatched (non-static) invoke - klassOop receiver_limit = NULL; - int decode_flags = 0; - methodOop m = decode_MethodHandle(mh(), receiver_limit, decode_flags); - if (m == NULL) return NULL; + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = decode_MethodHandle(mh(), receiver_limit, decode_flags); + if (m.is_null()) return NULL; switch (format) { case ETF_REFLECT_METHOD: // same as jni_ToReflectedMethod: @@ -903,10 +898,10 @@ if (SystemDictionary::MemberName_klass() == NULL) break; instanceKlassHandle mname_klass(THREAD, SystemDictionary::MemberName_klass()); mname_klass->initialize(CHECK_NULL); - Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL); + Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL); // possible safepoint java_lang_invoke_MemberName::set_vmindex(mname(), VM_INDEX_UNINITIALIZED); bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); - init_MemberName(mname(), m, do_dispatch); + init_MemberName(mname(), m(), do_dispatch); expand_MemberName(mname, 0, CHECK_NULL); return mname(); } @@ -1459,8 +1454,8 @@ // that links the interpreter calls to the method. We need the same // bits, and will use the same calling sequence code. - int vmindex = methodOopDesc::garbage_vtable_index; - oop vmtarget = NULL; + int vmindex = methodOopDesc::garbage_vtable_index; + Handle vmtarget; instanceKlass::cast(m->method_holder())->link_class(CHECK); @@ -1478,7 +1473,7 @@ } else if (!do_dispatch || m->can_be_statically_bound()) { // We are simulating an invokestatic or invokespecial instruction. // Set up the method pointer, just like ConstantPoolCacheEntry::set_method(). - vmtarget = m(); + vmtarget = m; // this does not help dispatch, but it will make it possible to parse this MH: vmindex = methodOopDesc::nonvirtual_vtable_index; assert(vmindex < 0, "(>=0) == do_dispatch"); @@ -1490,7 +1485,7 @@ // For a DMH, it is done now, when the handle is created. Klass* k = Klass::cast(m->method_holder()); if (k->should_be_initialized()) { - k->initialize(CHECK); + k->initialize(CHECK); // possible safepoint } } } else { @@ -1504,10 +1499,10 @@ if (me == NULL) { THROW(vmSymbols::java_lang_InternalError()); } - java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget); - java_lang_invoke_DirectMethodHandle::set_vmindex(mh(), vmindex); - DEBUG_ONLY(int flags; klassOop rlimit); - assert(MethodHandles::decode_method(mh(), rlimit, flags) == m(), + java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget()); + java_lang_invoke_DirectMethodHandle::set_vmindex( mh(), vmindex); + DEBUG_ONLY(KlassHandle rlimit; int flags); + assert(MethodHandles::decode_method(mh(), rlimit, flags) == m, "properly stored for later decoding"); DEBUG_ONLY(bool actual_do_dispatch = ((flags & _dmf_does_dispatch) != 0)); assert(!(actual_do_dispatch && !do_dispatch), @@ -1523,10 +1518,13 @@ methodHandle m, TRAPS) { // Verify type. - oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh()); + KlassHandle bound_recv_type; + { + oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh()); + if (receiver != NULL) + bound_recv_type = KlassHandle(THREAD, receiver->klass()); + } Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); - KlassHandle bound_recv_type; - if (receiver != NULL) bound_recv_type = KlassHandle(THREAD, receiver->klass()); verify_method_type(m, mtype, true, bound_recv_type, CHECK); int receiver_pos = m->size_of_parameters() - 1; @@ -1573,8 +1571,8 @@ java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m()); - DEBUG_ONLY(int junk; klassOop junk2); - assert(MethodHandles::decode_method(mh(), junk2, junk) == m(), "properly stored for later decoding"); + DEBUG_ONLY(KlassHandle junk1; int junk2); + assert(MethodHandles::decode_method(mh(), junk1, junk2) == m, "properly stored for later decoding"); assert(decode_MethodHandle_stack_pushes(mh()) == 1, "BMH pushes one stack slot"); // Done! @@ -1682,8 +1680,11 @@ } // Get bound type and required slots. - oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum); - BasicType ptype = java_lang_Class::as_BasicType(ptype_oop); + BasicType ptype; + { + oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum); + ptype = java_lang_Class::as_BasicType(ptype_oop); + } int slots_pushed = type2size[ptype]; // If (a) the target is a direct non-dispatched method handle, @@ -1694,13 +1695,12 @@ if (OptimizeMethodHandles && target->klass() == SystemDictionary::DirectMethodHandle_klass() && (argnum == 0 || java_lang_invoke_DirectMethodHandle::vmindex(target()) < 0)) { - int decode_flags = 0; klassOop receiver_limit_oop = NULL; - methodHandle m(THREAD, decode_method(target(), receiver_limit_oop, decode_flags)); + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = decode_method(target(), receiver_limit, decode_flags); if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); } DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - slots_pushed); // pos. of 1st arg. assert(java_lang_invoke_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig"); if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) { - KlassHandle receiver_limit(THREAD, receiver_limit_oop); init_BoundMethodHandle_with_receiver(mh, m, receiver_limit, decode_flags, CHECK); @@ -2019,7 +2019,6 @@ } void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { - oop argument = java_lang_invoke_AdapterMethodHandle::argument(mh()); int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); jint conv_op = adapter_conversion_op(conversion); @@ -2215,18 +2214,14 @@ // which method are we really talking about? if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } - oop target_oop = JNIHandles::resolve_non_null(target_jh); - if (java_lang_invoke_MemberName::is_instance(target_oop) && - java_lang_invoke_MemberName::vmindex(target_oop) == VM_INDEX_UNINITIALIZED) { - Handle mname(THREAD, target_oop); - MethodHandles::resolve_MemberName(mname, CHECK); - target_oop = mname(); // in case of GC + Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); + if (java_lang_invoke_MemberName::is_instance(target()) && + java_lang_invoke_MemberName::vmindex(target()) == VM_INDEX_UNINITIALIZED) { + MethodHandles::resolve_MemberName(target, CHECK); } - int decode_flags = 0; klassOop receiver_limit = NULL; - methodHandle m(THREAD, - MethodHandles::decode_method(target_oop, - receiver_limit, decode_flags)); + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags); if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "no such method"); } // The trusted Java code that calls this method should already have performed @@ -2284,12 +2279,8 @@ // Target object is a reflective method. (%%% Do we need this alternate path?) Untested("init_BMH of non-MH"); if (argnum != 0) { THROW(vmSymbols::java_lang_InternalError()); } - int decode_flags = 0; klassOop receiver_limit_oop = NULL; - methodHandle m(THREAD, - MethodHandles::decode_method(target(), - receiver_limit_oop, - decode_flags)); - KlassHandle receiver_limit(THREAD, receiver_limit_oop); + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags); MethodHandles::init_BoundMethodHandle_with_receiver(mh, m, receiver_limit, decode_flags, @@ -2424,12 +2415,12 @@ #ifndef PRODUCT if (which >= 0 && which < con_value_count) { int con = con_values[which]; - objArrayOop box = (objArrayOop) JNIHandles::resolve(box_jh); - if (box != NULL && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) { + objArrayHandle box(THREAD, (objArrayOop) JNIHandles::resolve(box_jh)); + if (box.not_null() && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) { const char* str = &con_names[0]; for (int i = 0; i < which; i++) str += strlen(str) + 1; // skip name and null - oop name = java_lang_String::create_oop_from_str(str, CHECK_0); + oop name = java_lang_String::create_oop_from_str(str, CHECK_0); // possible safepoint box->obj_at_put(0, name); } return con; @@ -2486,10 +2477,10 @@ jclass clazz_jh, jstring name_jh, jstring sig_jh, int mflags, jclass caller_jh, jint skip, jobjectArray results_jh)) { if (clazz_jh == NULL || results_jh == NULL) return -1; - klassOop k_oop = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh)); + KlassHandle k(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh))); - objArrayOop results = (objArrayOop) JNIHandles::resolve(results_jh); - if (results == NULL || !results->is_objArray()) return -1; + objArrayHandle results(THREAD, (objArrayOop) JNIHandles::resolve(results_jh)); + if (results.is_null() || !results->is_objArray()) return -1; TempNewSymbol name = NULL; TempNewSymbol sig = NULL; @@ -2502,20 +2493,20 @@ if (sig == NULL) return 0; // a match is not possible } - klassOop caller = NULL; + KlassHandle caller; if (caller_jh != NULL) { oop caller_oop = JNIHandles::resolve_non_null(caller_jh); if (!java_lang_Class::is_instance(caller_oop)) return -1; - caller = java_lang_Class::as_klassOop(caller_oop); + caller = KlassHandle(THREAD, java_lang_Class::as_klassOop(caller_oop)); } - if (name != NULL && sig != NULL && results != NULL) { + if (name != NULL && sig != NULL && results.not_null()) { // try a direct resolve // %%% TO DO } - int res = MethodHandles::find_MemberNames(k_oop, name, sig, mflags, - caller, skip, results); + int res = MethodHandles::find_MemberNames(k(), name, sig, mflags, + caller(), skip, results()); // TO DO: expand at least some of the MemberNames, to avoid massive callbacks return res; } diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/prims/methodHandles.hpp --- a/src/share/vm/prims/methodHandles.hpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/prims/methodHandles.hpp Fri Apr 29 14:36:21 2011 -0700 @@ -265,13 +265,13 @@ static inline address from_interpreted_entry(EntryKind ek); // helpers for decode_method. - static methodOop decode_methodOop(methodOop m, int& decode_flags_result); - static methodOop decode_vmtarget(oop vmtarget, int vmindex, oop mtype, klassOop& receiver_limit_result, int& decode_flags_result); - static methodOop decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result); - static methodOop decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); - static methodOop decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); - static methodOop decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); - static methodOop decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); + static methodOop decode_methodOop(methodOop m, int& decode_flags_result); + static methodHandle decode_vmtarget(oop vmtarget, int vmindex, oop mtype, KlassHandle& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); // Find out how many stack slots an mh pushes or pops. // The result is *not* reported as a multiple of stack_move_unit(); @@ -317,7 +317,7 @@ _dmf_adapter_lsb = 0x20, _DMF_ADAPTER_MASK = (_dmf_adapter_lsb << CONV_OP_LIMIT) - _dmf_adapter_lsb }; - static methodOop decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result); enum { // format of query to getConstant: GC_JVM_PUSH_LIMIT = 0, diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/runtime/globals.hpp Fri Apr 29 14:36:21 2011 -0700 @@ -620,6 +620,9 @@ product(bool, UseSSE42Intrinsics, false, \ "SSE4.2 versions of intrinsics") \ \ + product(bool, UseCondCardMark, false, \ + "Check for already marked card before updating card table") \ + \ develop(bool, TraceCallFixup, false, \ "traces all call fixups") \ \ diff -r c6033dad9fd3 -r df0a92a7e30b src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Fri Apr 29 12:33:39 2011 -0700 +++ b/src/share/vm/runtime/sharedRuntime.cpp Fri Apr 29 14:36:21 2011 -0700 @@ -1721,14 +1721,14 @@ targetArity = ArgumentCount(target->signature()).size(); } } - klassOop kignore; int dmf_flags = 0; - methodOop actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags); + KlassHandle kignore; int dmf_flags = 0; + methodHandle actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags); if ((dmf_flags & ~(MethodHandles::_dmf_has_receiver | MethodHandles::_dmf_does_dispatch | MethodHandles::_dmf_from_interface)) != 0) - actual_method = NULL; // MH does extra binds, drops, etc. + actual_method = methodHandle(); // MH does extra binds, drops, etc. bool has_receiver = ((dmf_flags & MethodHandles::_dmf_has_receiver) != 0); - if (actual_method != NULL) { + if (actual_method.not_null()) { mhName = actual_method->signature()->as_C_string(); mhArity = ArgumentCount(actual_method->signature()).size(); if (!actual_method->is_static()) mhArity += 1;