# HG changeset patch # User kvn # Date 1326247538 28800 # Node ID 35acf8f0a2e4d294e9fcbb06c8ad0e221b747f6f # Parent e9a5e0a812c8fe0dd191addca72df99d6e994b1c 7128352: assert(obj_node == obj) failed Summary: Compare uncasted object nodes. Reviewed-by: never diff -r e9a5e0a812c8 -r 35acf8f0a2e4 src/share/vm/opto/callnode.cpp --- a/src/share/vm/opto/callnode.cpp Sat Jan 07 13:26:43 2012 -0800 +++ b/src/share/vm/opto/callnode.cpp Tue Jan 10 18:05:38 2012 -0800 @@ -1386,7 +1386,7 @@ Node *n = ctrl_proj->in(0); if (n != NULL && n->is_Unlock()) { UnlockNode *unlock = n->as_Unlock(); - if ((lock->obj_node() == unlock->obj_node()) && + if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && !unlock->is_eliminated()) { lock_ops.append(unlock); @@ -1431,7 +1431,7 @@ } if (ctrl->is_Lock()) { LockNode *lock = ctrl->as_Lock(); - if ((lock->obj_node() == unlock->obj_node()) && + if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { lock_result = lock; } @@ -1462,7 +1462,7 @@ } if (lock1_node != NULL && lock1_node->is_Lock()) { LockNode *lock1 = lock1_node->as_Lock(); - if ((lock->obj_node() == lock1->obj_node()) && + if (lock->obj_node()->eqv_uncast(lock1->obj_node()) && BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && !lock1->is_eliminated()) { lock_ops.append(lock1); @@ -1650,7 +1650,7 @@ for (int idx = 0; idx < num_mon; idx++) { Node* obj_node = sfn->monitor_obj(jvms, idx); BoxLockNode* box_node = BoxLockNode::box_node(sfn->monitor_box(jvms, idx)); - if ((obj_node == obj) && (box_node->stack_slot() < stk_slot)) { + if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { return true; } } diff -r e9a5e0a812c8 -r 35acf8f0a2e4 src/share/vm/opto/cfgnode.cpp --- a/src/share/vm/opto/cfgnode.cpp Sat Jan 07 13:26:43 2012 -0800 +++ b/src/share/vm/opto/cfgnode.cpp Tue Jan 10 18:05:38 2012 -0800 @@ -1597,7 +1597,7 @@ bool is_loop = (r->is_Loop() && r->req() == 3); // Then, check if there is a data loop when phi references itself directly // or through other data nodes. - if (is_loop && !phase->eqv_uncast(uin, in(LoopNode::EntryControl)) || + if (is_loop && !uin->eqv_uncast(in(LoopNode::EntryControl)) || !is_loop && is_unsafe_data_reference(uin)) { // Break this data loop to avoid creation of a dead loop. if (can_reshape) { diff -r e9a5e0a812c8 -r 35acf8f0a2e4 src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Sat Jan 07 13:26:43 2012 -0800 +++ b/src/share/vm/opto/library_call.cpp Tue Jan 10 18:05:38 2012 -0800 @@ -819,7 +819,7 @@ if (stopped()) return NULL; // already stopped bool zero_offset = _gvn.type(offset) == TypeInt::ZERO; - if (zero_offset && _gvn.eqv_uncast(subseq_length, array_length)) + if (zero_offset && subseq_length->eqv_uncast(array_length)) return NULL; // common case of whole-array copy Node* last = subseq_length; if (!zero_offset) // last += offset @@ -4667,7 +4667,7 @@ if (ReduceBulkZeroing && !ZeroTLAB // pointless if already zeroed && basic_elem_type != T_CONFLICT // avoid corner case - && !_gvn.eqv_uncast(src, dest) + && !src->eqv_uncast(dest) && ((alloc = tightly_coupled_allocation(dest, slow_region)) != NULL) && _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0 @@ -4745,7 +4745,7 @@ // copy_length is 0. if (!stopped() && dest_uninitialized) { Node* dest_length = alloc->in(AllocateNode::ALength); - if (_gvn.eqv_uncast(copy_length, dest_length) + if (copy_length->eqv_uncast(dest_length) || _gvn.find_int_con(dest_length, 1) <= 0) { // There is no zeroing to do. No need for a secondary raw memory barrier. } else { @@ -4791,7 +4791,7 @@ // with its attendant messy index arithmetic, and upgrade // the copy to a more hardware-friendly word size of 64 bits. Node* tail_ctl = NULL; - if (!stopped() && !_gvn.eqv_uncast(dest_tail, dest_length)) { + if (!stopped() && !dest_tail->eqv_uncast(dest_length)) { Node* cmp_lt = _gvn.transform( new(C,3) CmpINode(dest_tail, dest_length) ); Node* bol_lt = _gvn.transform( new(C,2) BoolNode(cmp_lt, BoolTest::lt) ); tail_ctl = generate_slow_guard(bol_lt, NULL); diff -r e9a5e0a812c8 -r 35acf8f0a2e4 src/share/vm/opto/locknode.cpp --- a/src/share/vm/opto/locknode.cpp Sat Jan 07 13:26:43 2012 -0800 +++ b/src/share/vm/opto/locknode.cpp Tue Jan 10 18:05:38 2012 -0800 @@ -100,7 +100,7 @@ AbstractLockNode* alock = n->as_AbstractLock(); // Check lock's box since box could be referenced by Lock's debug info. if (alock->box_node() == this) { - if (alock->obj_node() == obj) { + if (alock->obj_node()->eqv_uncast(obj)) { if ((unique_lock != NULL) && alock->is_Lock()) { if (lock == NULL) { lock = alock->as_Lock(); @@ -121,7 +121,7 @@ Node* n = this->raw_out(i); if (n->is_FastLock()) { FastLockNode* flock = n->as_FastLock(); - assert((flock->box_node() == this) && (flock->obj_node() == obj),""); + assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),""); } if (n->is_SafePoint() && n->as_SafePoint()->jvms()) { SafePointNode* sfn = n->as_SafePoint(); @@ -135,7 +135,7 @@ Node* obj_node = sfn->monitor_obj(jvms, idx); Node* box_node = sfn->monitor_box(jvms, idx); if (box_node == this) { - assert(obj_node == obj,""); + assert(obj_node->eqv_uncast(obj),""); } } } diff -r e9a5e0a812c8 -r 35acf8f0a2e4 src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Sat Jan 07 13:26:43 2012 -0800 +++ b/src/share/vm/opto/macro.cpp Tue Jan 10 18:05:38 2012 -0800 @@ -1818,7 +1818,7 @@ AbstractLockNode* alock = u->as_AbstractLock(); // Check lock's box since box could be referenced by Lock's debug info. if (alock->box_node() == oldbox) { - assert(alock->obj_node() == obj, ""); + assert(alock->obj_node()->eqv_uncast(obj), ""); // Mark eliminated all related locks and unlocks. alock->set_non_esc_obj(); } @@ -1845,7 +1845,7 @@ Node* u = oldbox->raw_out(i); if (u->is_AbstractLock()) { AbstractLockNode* alock = u->as_AbstractLock(); - if (alock->obj_node() == obj && alock->box_node() == oldbox) { + if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) { // Replace Box and mark eliminated all related locks and unlocks. alock->set_non_esc_obj(); _igvn.hash_delete(alock); @@ -1854,7 +1854,7 @@ next_edge = false; } } - if (u->is_FastLock() && u->as_FastLock()->obj_node() == obj) { + if (u->is_FastLock() && u->as_FastLock()->obj_node()->eqv_uncast(obj)) { FastLockNode* flock = u->as_FastLock(); assert(flock->box_node() == oldbox, "sanity"); _igvn.hash_delete(flock); @@ -1875,7 +1875,7 @@ for (int idx = 0; idx < num_mon; idx++) { Node* obj_node = sfn->monitor_obj(jvms, idx); Node* box_node = sfn->monitor_box(jvms, idx); - if (box_node == oldbox && obj_node == obj) { + if (box_node == oldbox && obj_node->eqv_uncast(obj)) { int j = jvms->monitor_box_offset(idx); _igvn.hash_delete(u); u->set_req(j, newbox); @@ -1912,7 +1912,7 @@ alock = u->as_AbstractLock(); if (alock->box_node() == box_node) { // Verify that this Box is referenced only by related locks. - assert(alock->obj_node() == obj, ""); + assert(alock->obj_node()->eqv_uncast(obj), ""); // Mark all related locks and unlocks. alock->set_nested(); } @@ -1931,7 +1931,8 @@ Node* obj = alock->obj_node(); for (uint j = 0; j < obj->outcnt(); j++) { Node* o = obj->raw_out(j); - if (o->is_AbstractLock() && o->as_AbstractLock()->obj_node() == obj) { + if (o->is_AbstractLock() && + o->as_AbstractLock()->obj_node()->eqv_uncast(obj)) { alock = o->as_AbstractLock(); Node* box = alock->box_node(); // Replace old box node with new eliminated box for all users diff -r e9a5e0a812c8 -r 35acf8f0a2e4 src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp Sat Jan 07 13:26:43 2012 -0800 +++ b/src/share/vm/opto/memnode.cpp Tue Jan 10 18:05:38 2012 -0800 @@ -2201,7 +2201,7 @@ // unsafe if I have intervening uses... Also disallowed for StoreCM // since they must follow each StoreP operation. Redundant StoreCMs // are eliminated just before matching in final_graph_reshape. - if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address) && + if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(address) && mem->Opcode() != Op_StoreCM) { // Looking at a dead closed cycle of memory? assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); @@ -2274,16 +2274,16 @@ // Load then Store? Then the Store is useless if (val->is_Load() && - phase->eqv_uncast( val->in(MemNode::Address), adr ) && - phase->eqv_uncast( val->in(MemNode::Memory ), mem ) && + val->in(MemNode::Address)->eqv_uncast(adr) && + val->in(MemNode::Memory )->eqv_uncast(mem) && val->as_Load()->store_Opcode() == Opcode()) { return mem; } // Two stores in a row of the same value? if (mem->is_Store() && - phase->eqv_uncast( mem->in(MemNode::Address), adr ) && - phase->eqv_uncast( mem->in(MemNode::ValueIn), val ) && + mem->in(MemNode::Address)->eqv_uncast(adr) && + mem->in(MemNode::ValueIn)->eqv_uncast(val) && mem->Opcode() == Opcode()) { return mem; } diff -r e9a5e0a812c8 -r 35acf8f0a2e4 src/share/vm/opto/node.cpp --- a/src/share/vm/opto/node.cpp Sat Jan 07 13:26:43 2012 -0800 +++ b/src/share/vm/opto/node.cpp Tue Jan 10 18:05:38 2012 -0800 @@ -833,8 +833,20 @@ //---------------------------uncast_helper------------------------------------- Node* Node::uncast_helper(const Node* p) { - uint max_depth = 3; - for (uint i = 0; i < max_depth; i++) { +#ifdef ASSERT + uint depth_count = 0; + const Node* orig_p = p; +#endif + + while (true) { +#ifdef ASSERT + if (depth_count >= K) { + orig_p->dump(4); + if (p != orig_p) + p->dump(1); + } + assert(depth_count++ < K, "infinite loop in Node::uncast_helper"); +#endif if (p == NULL || p->req() != 2) { break; } else if (p->is_ConstraintCast()) { diff -r e9a5e0a812c8 -r 35acf8f0a2e4 src/share/vm/opto/node.hpp --- a/src/share/vm/opto/node.hpp Sat Jan 07 13:26:43 2012 -0800 +++ b/src/share/vm/opto/node.hpp Tue Jan 10 18:05:38 2012 -0800 @@ -429,6 +429,10 @@ // Strip away casting. (It is depth-limited.) Node* uncast() const; + // Return whether two Nodes are equivalent, after stripping casting. + bool eqv_uncast(const Node* n) const { + return (this->uncast() == n->uncast()); + } private: static Node* uncast_helper(const Node* n); diff -r e9a5e0a812c8 -r 35acf8f0a2e4 src/share/vm/opto/phaseX.hpp --- a/src/share/vm/opto/phaseX.hpp Sat Jan 07 13:26:43 2012 -0800 +++ b/src/share/vm/opto/phaseX.hpp Tue Jan 10 18:05:38 2012 -0800 @@ -256,11 +256,6 @@ // For pessimistic optimizations this is simply pointer equivalence. bool eqv(const Node* n1, const Node* n2) const { return n1 == n2; } - // Return whether two Nodes are equivalent, after stripping casting. - bool eqv_uncast(const Node* n1, const Node* n2) const { - return eqv(n1->uncast(), n2->uncast()); - } - // For pessimistic passes, the return type must monotonically narrow. // For optimistic passes, the return type must monotonically widen. // It is possible to get into a "death march" in either type of pass, diff -r e9a5e0a812c8 -r 35acf8f0a2e4 src/share/vm/opto/subnode.cpp --- a/src/share/vm/opto/subnode.cpp Sat Jan 07 13:26:43 2012 -0800 +++ b/src/share/vm/opto/subnode.cpp Tue Jan 10 18:05:38 2012 -0800 @@ -91,7 +91,7 @@ // Not correct for SubFnode and AddFNode (must check for infinity) // Equal? Subtract is zero - if (phase->eqv_uncast(in1, in2)) return add_id(); + if (in1->eqv_uncast(in2)) return add_id(); // Either input is BOTTOM ==> the result is the local BOTTOM if( t1 == Type::BOTTOM || t2 == Type::BOTTOM ) diff -r e9a5e0a812c8 -r 35acf8f0a2e4 test/compiler/7116216/StackOverflow.java --- a/test/compiler/7116216/StackOverflow.java Sat Jan 07 13:26:43 2012 -0800 +++ b/test/compiler/7116216/StackOverflow.java Tue Jan 10 18:05:38 2012 -0800 @@ -30,7 +30,7 @@ * @run main/othervm -Xcomp -Xbatch StackOverflow */ -class StackOverflow { +public class StackOverflow { static String stackOverflow_largeFrame_liveOopForGC; public static int stackOverflow_largeFrame(int call_count, String liveOopForGC) {