# HG changeset patch # User kvn # Date 1276650447 25200 # Node ID 4311f23817fd0fffbd8a957b76270e7940ab952b # Parent 2389669474a6d406c4baad7814649b9057fb2b2a 6959430: Make sure raw loads have control edge Summary: check that raw loads have control edge Reviewed-by: never, twisti diff -r 2389669474a6 -r 4311f23817fd src/share/vm/opto/compile.cpp --- a/src/share/vm/opto/compile.cpp Tue Jun 15 15:57:36 2010 -0700 +++ b/src/share/vm/opto/compile.cpp Tue Jun 15 18:07:27 2010 -0700 @@ -2000,6 +2000,17 @@ } } +#ifdef ASSERT + if( n->is_Mem() ) { + Compile* C = Compile::current(); + int alias_idx = C->get_alias_index(n->as_Mem()->adr_type()); + assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw || + // oop will be recorded in oop map if load crosses safepoint + n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() || + LoadNode::is_immutable_value(n->in(MemNode::Address))), + "raw memory operations should have control edge"); + } +#endif // Count FPU ops and common calls, implements item (3) switch( nop ) { // Count all float operations that may use FPU diff -r 2389669474a6 -r 4311f23817fd src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Tue Jun 15 15:57:36 2010 -0700 +++ b/src/share/vm/opto/graphKit.cpp Tue Jun 15 18:07:27 2010 -0700 @@ -1789,9 +1789,10 @@ void GraphKit::increment_counter(Node* counter_addr) { int adr_type = Compile::AliasIdxRaw; - Node* cnt = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type); + Node* ctrl = control(); + Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type); Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1))); - store_to_memory( NULL, counter_addr, incr, T_INT, adr_type ); + store_to_memory( ctrl, counter_addr, incr, T_INT, adr_type ); } @@ -2771,11 +2772,7 @@ // Update the counter for this lock. Don't bother using an atomic // operation since we don't require absolute accuracy. lock->create_lock_counter(map()->jvms()); - int adr_type = Compile::AliasIdxRaw; - Node* counter_addr = makecon(TypeRawPtr::make(lock->counter()->addr())); - Node* cnt = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type); - Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1))); - store_to_memory(control(), counter_addr, incr, T_INT, adr_type); + increment_counter(lock->counter()->addr()); } #endif diff -r 2389669474a6 -r 4311f23817fd src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Tue Jun 15 15:57:36 2010 -0700 +++ b/src/share/vm/opto/library_call.cpp Tue Jun 15 18:07:27 2010 -0700 @@ -3512,8 +3512,7 @@ // Get the header out of the object, use LoadMarkNode when available Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); - Node* header = make_load(NULL, header_addr, TypeRawPtr::BOTTOM, T_ADDRESS); - header = _gvn.transform( new (C, 2) CastP2XNode(NULL, header) ); + Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type()); // Test the header to see if it is unlocked. Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); @@ -5202,7 +5201,7 @@ // super_check_offset, for the desired klass. int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc); Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); - Node* n3 = new(C, 3) LoadINode(NULL, immutable_memory(), p3, TypeRawPtr::BOTTOM); + Node* n3 = new(C, 3) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr()); Node* check_offset = _gvn.transform(n3); Node* check_value = dest_elem_klass; diff -r 2389669474a6 -r 4311f23817fd src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Tue Jun 15 15:57:36 2010 -0700 +++ b/src/share/vm/opto/macro.cpp Tue Jun 15 18:07:27 2010 -0700 @@ -1431,7 +1431,7 @@ Node* mark_node = NULL; // For now only enable fast locking for non-array types if (UseBiasedLocking && (length == NULL)) { - mark_node = make_load(NULL, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS); + mark_node = make_load(control, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS); } else { mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype())); } diff -r 2389669474a6 -r 4311f23817fd src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp Tue Jun 15 15:57:36 2010 -0700 +++ b/src/share/vm/opto/memnode.cpp Tue Jun 15 18:07:27 2010 -0700 @@ -815,6 +815,16 @@ } #endif +#ifdef ASSERT +//----------------------------is_immutable_value------------------------------- +// Helper function to allow a raw load without control edge for some cases +bool LoadNode::is_immutable_value(Node* adr) { + return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() && + adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && + (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) == + in_bytes(JavaThread::osthread_offset()))); +} +#endif //----------------------------LoadNode::make----------------------------------- // Polymorphic factory method: @@ -828,6 +838,11 @@ assert(!(adr_type->isa_aryptr() && adr_type->offset() == arrayOopDesc::length_offset_in_bytes()), "use LoadRangeNode instead"); + // Check control edge of raw loads + assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || + // oop will be recorded in oop map if load crosses safepoint + rt->isa_oopptr() || is_immutable_value(adr), + "raw memory operations should have control edge"); switch (bt) { case T_BOOLEAN: return new (C, 3) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int() ); case T_BYTE: return new (C, 3) LoadBNode (ctl, mem, adr, adr_type, rt->is_int() ); @@ -2064,6 +2079,8 @@ // Polymorphic factory method: StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) { Compile* C = gvn.C; + assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw || + ctl != NULL, "raw memory operations should have control edge"); switch (bt) { case T_BOOLEAN: diff -r 2389669474a6 -r 4311f23817fd src/share/vm/opto/memnode.hpp --- a/src/share/vm/opto/memnode.hpp Tue Jun 15 15:57:36 2010 -0700 +++ b/src/share/vm/opto/memnode.hpp Tue Jun 15 18:07:27 2010 -0700 @@ -189,6 +189,10 @@ #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; #endif +#ifdef ASSERT + // Helper function to allow a raw load without control edge for some cases + static bool is_immutable_value(Node* adr); +#endif protected: const Type* load_array_final_field(const TypeKlassPtr *tkls, ciKlass* klass) const; diff -r 2389669474a6 -r 4311f23817fd src/share/vm/opto/parse1.cpp --- a/src/share/vm/opto/parse1.cpp Tue Jun 15 15:57:36 2010 -0700 +++ b/src/share/vm/opto/parse1.cpp Tue Jun 15 18:07:27 2010 -0700 @@ -88,15 +88,16 @@ Node *local_addrs_base) { Node *mem = memory(Compile::AliasIdxRaw); Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize ); + Node *ctl = control(); // Very similar to LoadNode::make, except we handle un-aligned longs and // doubles on Sparc. Intel can handle them just fine directly. Node *l; switch( bt ) { // Signature is flattened - case T_INT: l = new (C, 3) LoadINode( 0, mem, adr, TypeRawPtr::BOTTOM ); break; - case T_FLOAT: l = new (C, 3) LoadFNode( 0, mem, adr, TypeRawPtr::BOTTOM ); break; - case T_ADDRESS: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); break; - case T_OBJECT: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break; + case T_INT: l = new (C, 3) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break; + case T_FLOAT: l = new (C, 3) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break; + case T_ADDRESS: l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); break; + case T_OBJECT: l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break; case T_LONG: case T_DOUBLE: { // Since arguments are in reverse order, the argument address 'adr' @@ -104,12 +105,12 @@ adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize ); if( Matcher::misaligned_doubles_ok ) { l = (bt == T_DOUBLE) - ? (Node*)new (C, 3) LoadDNode( 0, mem, adr, TypeRawPtr::BOTTOM ) - : (Node*)new (C, 3) LoadLNode( 0, mem, adr, TypeRawPtr::BOTTOM ); + ? (Node*)new (C, 3) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM ) + : (Node*)new (C, 3) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); } else { l = (bt == T_DOUBLE) - ? (Node*)new (C, 3) LoadD_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM ) - : (Node*)new (C, 3) LoadL_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM ); + ? (Node*)new (C, 3) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM ) + : (Node*)new (C, 3) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); } break; }