changeset 163:885ed790ecf0

6695810: null oop passed to encode_heap_oop_not_null Summary: fix several problems in C2 related to Escape Analysis and Compressed Oops. Reviewed-by: never, jrose
author kvn
date Wed, 21 May 2008 10:45:07 -0700
parents 8aa010f60e0f
children c436414a719e
files src/cpu/sparc/vm/sparc.ad src/cpu/x86/vm/x86_64.ad src/share/vm/opto/callnode.cpp src/share/vm/opto/cfgnode.cpp src/share/vm/opto/compile.cpp src/share/vm/opto/connode.cpp src/share/vm/opto/connode.hpp src/share/vm/opto/escape.cpp src/share/vm/opto/library_call.cpp src/share/vm/opto/macro.cpp src/share/vm/opto/matcher.cpp src/share/vm/opto/memnode.cpp src/share/vm/opto/memnode.hpp src/share/vm/opto/node.cpp src/share/vm/opto/output.cpp src/share/vm/opto/type.cpp src/share/vm/opto/type.hpp test/compiler/6689060/Test.java test/compiler/6695810/Test.java
diffstat 19 files changed, 1086 insertions(+), 249 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/sparc.ad	Tue May 20 06:32:58 2008 -0700
+++ b/src/cpu/sparc/vm/sparc.ad	Wed May 21 10:45:07 2008 -0700
@@ -5471,7 +5471,7 @@
 // Load Klass Pointer
 instruct loadKlass(iRegP dst, memory mem) %{
   match(Set dst (LoadKlass mem));
-  predicate(!n->in(MemNode::Address)->bottom_type()->is_narrow());
+  predicate(!n->in(MemNode::Address)->bottom_type()->is_ptr_to_narrowoop());
   ins_cost(MEMORY_REF_COST);
   size(4);
 
@@ -5489,7 +5489,7 @@
 // Load Klass Pointer
 instruct loadKlassComp(iRegP dst, memory mem) %{
   match(Set dst (LoadKlass mem));
-  predicate(n->in(MemNode::Address)->bottom_type()->is_narrow());
+  predicate(n->in(MemNode::Address)->bottom_type()->is_ptr_to_narrowoop());
   ins_cost(MEMORY_REF_COST);
 
   format %{ "LDUW   $mem,$dst\t! compressed klass ptr" %}
--- a/src/cpu/x86/vm/x86_64.ad	Tue May 20 06:32:58 2008 -0700
+++ b/src/cpu/x86/vm/x86_64.ad	Wed May 21 10:45:07 2008 -0700
@@ -6044,10 +6044,9 @@
 %}
 
 // Load Compressed Pointer
-instruct loadN(rRegN dst, memory mem, rFlagsReg cr)
+instruct loadN(rRegN dst, memory mem)
 %{
    match(Set dst (LoadN mem));
-   effect(KILL cr);
 
    ins_cost(125); // XXX
    format %{ "movl    $dst, $mem\t# compressed ptr" %}
@@ -6064,7 +6063,7 @@
 instruct loadKlass(rRegP dst, memory mem)
 %{
   match(Set dst (LoadKlass mem));
-  predicate(!n->in(MemNode::Address)->bottom_type()->is_narrow());
+  predicate(!n->in(MemNode::Address)->bottom_type()->is_ptr_to_narrowoop());
 
   ins_cost(125); // XXX
   format %{ "movq    $dst, $mem\t# class" %}
@@ -6074,10 +6073,11 @@
 %}
 
 // Load Klass Pointer
-instruct loadKlassComp(rRegP dst, memory mem)
+instruct loadKlassComp(rRegP dst, memory mem, rFlagsReg cr)
 %{
   match(Set dst (LoadKlass mem));
-  predicate(n->in(MemNode::Address)->bottom_type()->is_narrow());
+  predicate(n->in(MemNode::Address)->bottom_type()->is_ptr_to_narrowoop());
+  effect(KILL cr);
 
   ins_cost(125); // XXX
   format %{ "movl    $dst, $mem\t# compressed class\n\t"
@@ -6358,8 +6358,9 @@
   ins_pipe(ialu_reg);
 %}
 
-instruct loadConN(rRegN dst, immN src) %{
+instruct loadConN(rRegN dst, immN src, rFlagsReg cr) %{
   match(Set dst src);
+  effect(KILL cr);
 
   ins_cost(125);
   format %{ "movq    $dst, $src\t# compressed ptr\n\t"
@@ -6633,10 +6634,9 @@
 %}
 
 // Store Compressed Pointer
-instruct storeN(memory mem, rRegN src, rFlagsReg cr)
+instruct storeN(memory mem, rRegN src)
 %{
   match(Set mem (StoreN mem src));
-  effect(KILL cr);
 
   ins_cost(125); // XXX
   format %{ "movl    $mem, $src\t# ptr" %}
--- a/src/share/vm/opto/callnode.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/callnode.cpp	Wed May 21 10:45:07 2008 -0700
@@ -637,7 +637,7 @@
   }
   Compile *C = phase->C;
   int offset = adrInst_t->offset();
-  assert(offset >= 0, "should be valid offset");
+  assert(adrInst_t->klass_is_exact() && offset >= 0, "should be valid offset");
   ciKlass* adr_k = adrInst_t->klass();
   assert(adr_k->is_loaded() &&
          adr_k->is_java_klass() &&
@@ -674,12 +674,11 @@
       ciKlass* at_k = at_ptr->klass();
       if ((adrInst_t->base() == at_ptr->base()) &&
           at_k->is_loaded() &&
-          at_k->is_java_klass() &&
-          !at_k->is_interface()) {
+          at_k->is_java_klass()) {
         // If we have found an argument matching addr_t, check if the field
         // at the specified offset is modified.
-        int at_idx = C->get_alias_index(at_ptr->add_offset(offset)->isa_oopptr());
-        if (base_idx == at_idx &&
+        if ((at_k->is_interface() || adr_k == at_k ||
+             adr_k->is_subclass_of(at_k) && !at_ptr->klass_is_exact()) &&
             (bcea == NULL ||
              bcea->is_arg_modified(i - TypeFunc::Parms, offset, size))) {
           return true;
--- a/src/share/vm/opto/cfgnode.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/cfgnode.cpp	Wed May 21 10:45:07 2008 -0700
@@ -707,8 +707,14 @@
 //------------------------split_out_instance-----------------------------------
 // Split out an instance type from a bottom phi.
 PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const {
-  assert(type() == Type::MEMORY && (adr_type() == TypePtr::BOTTOM ||
-         adr_type() == TypeRawPtr::BOTTOM) , "bottom or raw memory required");
+  const TypeOopPtr *t_oop = at->isa_oopptr();
+  assert(t_oop != NULL && t_oop->is_instance(), "expecting instance oopptr");
+  const TypePtr *t = adr_type();
+  assert(type() == Type::MEMORY &&
+         (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
+          t->isa_oopptr() && !t->is_oopptr()->is_instance() &&
+          t->is_oopptr()->cast_to_instance(t_oop->instance_id()) == t_oop),
+         "bottom or raw memory required");
 
   // Check if an appropriate node already exists.
   Node *region = in(0);
@@ -1342,7 +1348,7 @@
     Node *n = phi->in(i);
     if( !n ) return NULL;
     if( phase->type(n) == Type::TOP ) return NULL;
-    if( n->Opcode() == Op_ConP )
+    if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN )
       break;
   }
   if( i >= phi->req() )         // Only split for constants
--- a/src/share/vm/opto/compile.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/compile.cpp	Wed May 21 10:45:07 2008 -0700
@@ -368,7 +368,12 @@
   BufferBlob* blob = BufferBlob::create("Compile::scratch_buffer", size);
   // Record the buffer blob for next time.
   set_scratch_buffer_blob(blob);
-  guarantee(scratch_buffer_blob() != NULL, "Need BufferBlob for code generation");
+  // Have we run out of code space?
+  if (scratch_buffer_blob() == NULL) {
+    // Let CompilerBroker disable further compilations.
+    record_failure("Not enough space for scratch buffer in CodeCache");
+    return;
+  }
 
   // Initialize the relocation buffers
   relocInfo* locs_buf = (relocInfo*) blob->instructions_end() - MAX_locs_size;
@@ -1065,6 +1070,8 @@
       // No constant oop pointers (such as Strings); they alias with
       // unknown strings.
       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
+    } else if( to->is_instance_field() ) {
+      tj = to; // Keep NotNull and klass_is_exact for instance type
     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
       // During the 2nd round of IterGVN, NotNull castings are removed.
       // Make sure the Bottom and NotNull variants alias the same.
@@ -1084,7 +1091,7 @@
     } else {
       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
       if (!k->equals(canonical_holder) || tj->offset() != offset) {
-        tj = to = TypeInstPtr::make(TypePtr::BotPTR, canonical_holder, false, NULL, offset, to->instance_id());
+        tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset, to->instance_id());
       }
     }
   }
--- a/src/share/vm/opto/connode.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/connode.cpp	Wed May 21 10:45:07 2008 -0700
@@ -578,8 +578,11 @@
   const Type* newtype = value->bottom_type();
   if (newtype == TypeNarrowOop::NULL_PTR) {
     return phase->transform(new (phase->C, 1) ConPNode(TypePtr::NULL_PTR));
+  } else if (newtype->isa_narrowoop()) {
+    return phase->transform(new (phase->C, 2) DecodeNNode(value, newtype->is_narrowoop()->make_oopptr()));
   } else {
-    return phase->transform(new (phase->C, 2) DecodeNNode(value, newtype->is_narrowoop()->make_oopptr()));
+    ShouldNotReachHere();
+    return NULL; // to make C++ compiler happy.
   }
 }
 
@@ -617,6 +620,9 @@
   }
 }
 
+Node *EncodePNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
+  return MemNode::Ideal_common_DU_postCCP(ccp, this, in(1));
+}
 
 //=============================================================================
 //------------------------------Identity---------------------------------------
--- a/src/share/vm/opto/connode.hpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/connode.hpp	Wed May 21 10:45:07 2008 -0700
@@ -283,6 +283,7 @@
   virtual uint  ideal_reg() const { return Op_RegN; }
 
   static Node* encode(PhaseGVN* phase, Node* value);
+  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
 };
 
 //------------------------------DecodeN--------------------------------
--- a/src/share/vm/opto/escape.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/escape.cpp	Wed May 21 10:45:07 2008 -0700
@@ -888,6 +888,23 @@
       record_for_optimizer(n);
       if (alloc->is_Allocate() && ptn->_scalar_replaceable &&
           (t->isa_instptr() || t->isa_aryptr())) {
+
+        // First, put on the worklist all Field edges from Connection Graph
+        // which is more accurate then putting immediate users from Ideal Graph.
+        for (uint e = 0; e < ptn->edge_count(); e++) {
+          Node *use = _nodes->adr_at(ptn->edge_target(e))->_node;
+          assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
+                 "only AddP nodes are Field edges in CG");
+          if (use->outcnt() > 0) { // Don't process dead nodes
+            Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
+            if (addp2 != NULL) {
+              assert(alloc->is_AllocateArray(),"array allocation was expected");
+              alloc_worklist.append_if_missing(addp2);
+            }
+            alloc_worklist.append_if_missing(use);
+          }
+        }
+
         // An allocation may have an Initialize which has raw stores. Scan
         // the users of the raw allocation result and push AddP users
         // on alloc_worklist.
@@ -919,6 +936,8 @@
       tinst = igvn->type(base)->isa_oopptr();
     } else if (n->is_Phi() ||
                n->is_CheckCastPP() ||
+               n->Opcode() == Op_EncodeP ||
+               n->Opcode() == Op_DecodeN ||
                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
       if (visited.test_set(n->_idx)) {
         assert(n->is_Phi(), "loops only through Phi's");
@@ -935,13 +954,25 @@
         tinst = igvn->type(val)->isa_oopptr();
         assert(tinst != NULL && tinst->is_instance() &&
                tinst->instance_id() == elem , "instance type expected.");
-        const TypeOopPtr *tn_t = igvn->type(tn)->isa_oopptr();
+
+        const TypeOopPtr *tn_t = NULL;
+        const Type *tn_type = igvn->type(tn);
+        if (tn_type->isa_narrowoop()) {
+          tn_t = tn_type->is_narrowoop()->make_oopptr()->isa_oopptr();
+        } else {
+          tn_t = tn_type->isa_oopptr();
+        }
 
         if (tn_t != NULL &&
  tinst->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE)->higher_equal(tn_t)) {
+          if (tn_type->isa_narrowoop()) {
+            tn_type = tinst->make_narrowoop();
+          } else {
+            tn_type = tinst;
+          }
           igvn->hash_delete(tn);
-          igvn->set_type(tn, tinst);
-          tn->set_type(tinst);
+          igvn->set_type(tn, tn_type);
+          tn->set_type(tn_type);
           igvn->hash_insert(tn);
           record_for_optimizer(n);
         }
@@ -978,6 +1009,8 @@
         alloc_worklist.append_if_missing(use);
       } else if (use->is_Phi() ||
                  use->is_CheckCastPP() ||
+                 use->Opcode() == Op_EncodeP ||
+                 use->Opcode() == Op_DecodeN ||
                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
         alloc_worklist.append_if_missing(use);
       }
@@ -1199,7 +1232,7 @@
 
 void ConnectionGraph::compute_escape() {
 
-  // 1. Populate Connection Graph with Ideal nodes.
+  // 1. Populate Connection Graph (CG) with Ideal nodes.
 
   Unique_Node_List worklist_init;
   worklist_init.map(_compile->unique(), NULL);  // preallocate space
@@ -1281,11 +1314,13 @@
       remove_deferred(ni, &deferred_edges, &visited);
       if (n->is_AddP()) {
         // If this AddP computes an address which may point to more that one
-        // object, nothing the address points to can be scalar replaceable.
+        // object or more then one field (array's element), nothing the address
+        // points to can be scalar replaceable.
         Node *base = get_addp_base(n);
         ptset.Clear();
         PointsTo(ptset, base, igvn);
-        if (ptset.Size() > 1) {
+        if (ptset.Size() > 1 ||
+            (ptset.Size() != 0 && ptn->offset() == Type::OffsetBot)) {
           for( VectorSetI j(&ptset); j.test(); ++j ) {
             uint pt = j.elem;
             ptnode_adr(pt)->_scalar_replaceable = false;
@@ -1979,6 +2014,11 @@
       assert(false, "Op_ConP");
       break;
     }
+    case Op_ConN:
+    {
+      assert(false, "Op_ConN");
+      break;
+    }
     case Op_CreateEx:
     {
       assert(false, "Op_CreateEx");
--- a/src/share/vm/opto/library_call.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/library_call.cpp	Wed May 21 10:45:07 2008 -0700
@@ -2168,7 +2168,7 @@
     // (They don't if CAS fails, but it isn't worth checking.)
     pre_barrier(control(), base, adr, alias_idx, newval, value_type, T_OBJECT);
 #ifdef _LP64
-    if (adr->bottom_type()->is_narrow()) {
+    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       cas = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr,
                                                            EncodePNode::encode(&_gvn, newval),
                                                            EncodePNode::encode(&_gvn, oldval)));
@@ -2838,6 +2838,8 @@
   _sp += nargs;  // set original stack for use by uncommon_trap
   mirror = do_null_check(mirror, T_OBJECT);
   _sp -= nargs;
+  // If mirror or obj is dead, only null-path is taken.
+  if (stopped())  return true;
 
   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
   RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
@@ -3827,24 +3829,22 @@
   if (!stopped()) {
     // Copy the fastest available way.
     // (No need for PreserveJVMState, since we're using it all up now.)
+    // TODO: generate fields/elements copies for small objects instead.
     Node* src  = obj;
     Node* dest = raw_obj;
-    Node* end  = dest;
     Node* size = _gvn.transform(alloc_siz);
 
     // Exclude the header.
     int base_off = instanceOopDesc::base_offset_in_bytes();
     if (UseCompressedOops) {
-      // copy the header gap though.
-      Node* sptr = basic_plus_adr(src,  base_off);
-      Node* dptr = basic_plus_adr(dest, base_off);
-      Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, raw_adr_type);
-      store_to_memory(control(), dptr, sval, T_INT, raw_adr_type);
-      base_off += sizeof(int);
+      assert(base_off % BytesPerLong != 0, "base with compressed oops");
+      // With compressed oops base_offset_in_bytes is 12 which creates
+      // the gap since countx is rounded by 8 bytes below.
+      // Copy klass and the gap.
+      base_off = instanceOopDesc::klass_offset_in_bytes();
     }
     src  = basic_plus_adr(src,  base_off);
     dest = basic_plus_adr(dest, base_off);
-    end  = basic_plus_adr(end,  size);
 
     // Compute the length also, if needed:
     Node* countx = size;
--- a/src/share/vm/opto/macro.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/macro.cpp	Wed May 21 10:45:07 2008 -0700
@@ -1282,12 +1282,6 @@
   }
   rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
 
-  if (UseCompressedOops) {
-    Node *zeronode = makecon(TypeInt::ZERO);
-    // store uncompressed 0 into klass ptr to zero out gap.  The gap is
-    // used for primitive fields and has to be zeroed.
-    rawmem = make_store(control, rawmem, object, oopDesc::klass_gap_offset_in_bytes(), zeronode, T_INT);
-  }
   rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT);
   int header_size = alloc->minimum_header_size();  // conservatively small
 
--- a/src/share/vm/opto/matcher.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/matcher.cpp	Wed May 21 10:45:07 2008 -0700
@@ -880,7 +880,7 @@
         Node *m = n->in(i);          // Get input
         int op = m->Opcode();
         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
-        if( op == Op_ConI || op == Op_ConP ||
+        if( op == Op_ConI || op == Op_ConP || op == Op_ConN ||
             op == Op_ConF || op == Op_ConD || op == Op_ConL
             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
             ) {
@@ -1726,6 +1726,14 @@
         }
         break;
       }
+      case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
+        TypeNode *tn = n->as_Type(); // Constants derive from type nodes
+        const TypePtr* tp = tn->type()->is_narrowoop()->make_oopptr();
+        if (tp->_ptr == TypePtr::AnyNull) {
+          tn->set_type(TypeNarrowOop::NULL_PTR);
+        }
+        break;
+      }
       case Op_Binary:         // These are introduced in the Post_Visit state.
         ShouldNotReachHere();
         break;
--- a/src/share/vm/opto/memnode.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/memnode.cpp	Wed May 21 10:45:07 2008 -0700
@@ -133,7 +133,9 @@
     PhiNode *mphi = result->as_Phi();
     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
     const TypePtr *t = mphi->adr_type();
-    if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM) {
+    if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
+        t->isa_oopptr() && !t->is_oopptr()->is_instance() &&
+        t->is_oopptr()->cast_to_instance(t_oop->instance_id()) == t_oop) {
       // clone the Phi with our address type
       result = mphi->split_out_instance(t_adr, igvn);
     } else {
@@ -263,7 +265,10 @@
   // of all its inputs dominate or equal to sub's control edge.
 
   // Currently 'sub' is either Allocate, Initialize or Start nodes.
-  assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start(), "expecting only these nodes");
+  // Or Region for the check in LoadNode::Ideal();
+  // 'sub' should have sub->in(0) != NULL.
+  assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() ||
+         sub->is_Region(), "expecting only these nodes");
 
   // Get control edge of 'sub'.
   sub = sub->find_exact_control(sub->in(0));
@@ -576,6 +581,9 @@
 // Find any cast-away of null-ness and keep its control.  Null cast-aways are
 // going away in this pass and we need to make this memory op depend on the
 // gating null check.
+Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
+  return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address));
+}
 
 // I tried to leave the CastPP's in.  This makes the graph more accurate in
 // some sense; we get to keep around the knowledge that an oop is not-null
@@ -585,15 +593,14 @@
 // some of the more trivial cases in the optimizer.  Removing more useless
 // Phi's started allowing Loads to illegally float above null checks.  I gave
 // up on this approach.  CNC 10/20/2000
-Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
-  Node *ctr = in(MemNode::Control);
-  Node *mem = in(MemNode::Memory);
-  Node *adr = in(MemNode::Address);
+// This static method may be called not from MemNode (EncodePNode calls it).
+// Only the control edge of the node 'n' might be updated.
+Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) {
   Node *skipped_cast = NULL;
   // Need a null check?  Regular static accesses do not because they are
   // from constant addresses.  Array ops are gated by the range check (which
   // always includes a NULL check).  Just check field ops.
-  if( !ctr ) {
+  if( n->in(MemNode::Control) == NULL ) {
     // Scan upwards for the highest location we can place this memory op.
     while( true ) {
       switch( adr->Opcode() ) {
@@ -618,10 +625,10 @@
         }
         // CastPP is going away in this pass!  We need this memory op to be
         // control-dependent on the test that is guarding the CastPP.
-        ccp->hash_delete(this);
-        set_req(MemNode::Control, adr->in(0));
-        ccp->hash_insert(this);
-        return this;
+        ccp->hash_delete(n);
+        n->set_req(MemNode::Control, adr->in(0));
+        ccp->hash_insert(n);
+        return n;
 
       case Op_Phi:
         // Attempt to float above a Phi to some dominating point.
@@ -652,10 +659,10 @@
           adr = adr->in(1);
           continue;
         }
-        ccp->hash_delete(this);
-        set_req(MemNode::Control, adr->in(0));
-        ccp->hash_insert(this);
-        return this;
+        ccp->hash_delete(n);
+        n->set_req(MemNode::Control, adr->in(0));
+        ccp->hash_insert(n);
+        return n;
 
         // List of "safe" opcodes; those that implicitly block the memory
         // op below any null check.
@@ -665,6 +672,7 @@
       case Op_LoadN:            // Loading from within a klass
       case Op_LoadKlass:        // Loading from within a klass
       case Op_ConP:             // Loading from a klass
+      case Op_ConN:             // Loading from a klass
       case Op_CreateEx:         // Sucking up the guts of an exception oop
       case Op_Con:              // Reading from TLS
       case Op_CMoveP:           // CMoveP is pinned
@@ -676,8 +684,8 @@
         {
           assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value");
           const Node* call = adr->in(0);
-          if (call->is_CallStaticJava()) {
-            const CallStaticJavaNode* call_java = call->as_CallStaticJava();
+          if (call->is_CallJava()) {
+            const CallJavaNode* call_java = call->as_CallJava();
             const TypeTuple *r = call_java->tf()->range();
             assert(r->cnt() > TypeFunc::Parms, "must return value");
             const Type* ret_type = r->field_at(TypeFunc::Parms);
@@ -749,7 +757,7 @@
   case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr()    );
   case T_OBJECT:
 #ifdef _LP64
-    if (adr->bottom_type()->is_narrow()) {
+    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       const TypeNarrowOop* narrowtype;
       if (rt->isa_narrowoop()) {
         narrowtype = rt->is_narrowoop();
@@ -761,10 +769,10 @@
       return DecodeNNode::decode(&gvn, load);
     } else
 #endif
-      {
-        assert(!adr->bottom_type()->is_narrow(), "should have got back a narrow oop");
-        return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
-      }
+    {
+      assert(!adr->bottom_type()->is_ptr_to_narrowoop(), "should have got back a narrow oop");
+      return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
+    }
   }
   ShouldNotReachHere();
   return (LoadNode*)NULL;
@@ -1118,6 +1126,127 @@
   return NULL;
 }
 
+//------------------------------split_through_phi------------------------------
+// Split instance field load through Phi.
+Node *LoadNode::split_through_phi(PhaseGVN *phase) {
+  Node* mem     = in(MemNode::Memory);
+  Node* address = in(MemNode::Address);
+  const TypePtr *addr_t = phase->type(address)->isa_ptr();
+  const TypeOopPtr *t_oop = addr_t->isa_oopptr();
+
+  assert(mem->is_Phi() && (t_oop != NULL) &&
+         t_oop->is_instance_field(), "invalide conditions");
+
+  Node *region = mem->in(0);
+  if (region == NULL) {
+    return NULL; // Wait stable graph
+  }
+  uint cnt = mem->req();
+  for( uint i = 1; i < cnt; i++ ) {
+    Node *in = mem->in(i);
+    if( in == NULL ) {
+      return NULL; // Wait stable graph
+    }
+  }
+  // Check for loop invariant.
+  if (cnt == 3) {
+    for( uint i = 1; i < cnt; i++ ) {
+      Node *in = mem->in(i);
+      Node* m = MemNode::optimize_memory_chain(in, addr_t, phase);
+      if (m == mem) {
+        set_req(MemNode::Memory, mem->in(cnt - i)); // Skip this phi.
+        return this;
+      }
+    }
+  }
+  // Split through Phi (see original code in loopopts.cpp).
+  assert(phase->C->have_alias_type(addr_t), "instance should have alias type");
+
+  // Do nothing here if Identity will find a value
+  // (to avoid infinite chain of value phis generation).
+  if ( !phase->eqv(this, this->Identity(phase)) )
+    return NULL;
+
+  // Skip the split if the region dominates some control edge of the address.
+  if (cnt == 3 && !MemNode::all_controls_dominate(address, region))
+    return NULL;
+
+  const Type* this_type = this->bottom_type();
+  int this_index  = phase->C->get_alias_index(addr_t);
+  int this_offset = addr_t->offset();
+  int this_iid    = addr_t->is_oopptr()->instance_id();
+  int wins = 0;
+  PhaseIterGVN *igvn = phase->is_IterGVN();
+  Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
+  for( uint i = 1; i < region->req(); i++ ) {
+    Node *x;
+    Node* the_clone = NULL;
+    if( region->in(i) == phase->C->top() ) {
+      x = phase->C->top();      // Dead path?  Use a dead data op
+    } else {
+      x = this->clone();        // Else clone up the data op
+      the_clone = x;            // Remember for possible deletion.
+      // Alter data node to use pre-phi inputs
+      if( this->in(0) == region ) {
+        x->set_req( 0, region->in(i) );
+      } else {
+        x->set_req( 0, NULL );
+      }
+      for( uint j = 1; j < this->req(); j++ ) {
+        Node *in = this->in(j);
+        if( in->is_Phi() && in->in(0) == region )
+          x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone
+      }
+    }
+    // Check for a 'win' on some paths
+    const Type *t = x->Value(igvn);
+
+    bool singleton = t->singleton();
+
+    // See comments in PhaseIdealLoop::split_thru_phi().
+    if( singleton && t == Type::TOP ) {
+      singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
+    }
+
+    if( singleton ) {
+      wins++;
+      x = igvn->makecon(t);
+    } else {
+      // We now call Identity to try to simplify the cloned node.
+      // Note that some Identity methods call phase->type(this).
+      // Make sure that the type array is big enough for
+      // our new node, even though we may throw the node away.
+      // (This tweaking with igvn only works because x is a new node.)
+      igvn->set_type(x, t);
+      Node *y = x->Identity(igvn);
+      if( y != x ) {
+        wins++;
+        x = y;
+      } else {
+        y = igvn->hash_find(x);
+        if( y ) {
+          wins++;
+          x = y;
+        } else {
+          // Else x is a new node we are keeping
+          // We do not need register_new_node_with_optimizer
+          // because set_type has already been called.
+          igvn->_worklist.push(x);
+        }
+      }
+    }
+    if (x != the_clone && the_clone != NULL)
+      igvn->remove_dead_node(the_clone);
+    phi->set_req(i, x);
+  }
+  if( wins > 0 ) {
+    // Record Phi
+    igvn->register_new_node_with_optimizer(phi);
+    return phi;
+  }
+  igvn->remove_dead_node(phi);
+  return NULL;
+}
 
 //------------------------------Ideal------------------------------------------
 // If the load is from Field memory and the pointer is non-null, we can
@@ -1175,112 +1304,9 @@
     const TypeOopPtr *t_oop = addr_t->isa_oopptr();
     if (can_reshape && opt_mem->is_Phi() &&
         (t_oop != NULL) && t_oop->is_instance_field()) {
-      assert(t_oop->offset() != Type::OffsetBot && t_oop->offset() != Type::OffsetTop, "");
-      Node *region = opt_mem->in(0);
-      uint cnt = opt_mem->req();
-      for( uint i = 1; i < cnt; i++ ) {
-        Node *in = opt_mem->in(i);
-        if( in == NULL ) {
-          region = NULL; // Wait stable graph
-          break;
-        }
-      }
-      if (region != NULL) {
-        // Check for loop invariant.
-        if (cnt == 3) {
-          for( uint i = 1; i < cnt; i++ ) {
-            Node *in = opt_mem->in(i);
-            Node* m = MemNode::optimize_memory_chain(in, addr_t, phase);
-            if (m == opt_mem) {
-              set_req(MemNode::Memory, opt_mem->in(cnt - i)); // Skip this phi.
-              return this;
-            }
-          }
-        }
-        // Split through Phi (see original code in loopopts.cpp).
-        assert(phase->C->have_alias_type(addr_t), "instance should have alias type");
-
-        // Do nothing here if Identity will find a value
-        // (to avoid infinite chain of value phis generation).
-        if ( !phase->eqv(this, this->Identity(phase)) )
-          return NULL;
-
-        const Type* this_type = this->bottom_type();
-        int this_index  = phase->C->get_alias_index(addr_t);
-        int this_offset = addr_t->offset();
-        int this_iid    = addr_t->is_oopptr()->instance_id();
-        int wins = 0;
-        PhaseIterGVN *igvn = phase->is_IterGVN();
-        Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
-        for( uint i = 1; i < region->req(); i++ ) {
-          Node *x;
-          Node* the_clone = NULL;
-          if( region->in(i) == phase->C->top() ) {
-            x = phase->C->top();      // Dead path?  Use a dead data op
-          } else {
-            x = this->clone();        // Else clone up the data op
-            the_clone = x;            // Remember for possible deletion.
-            // Alter data node to use pre-phi inputs
-            if( this->in(0) == region ) {
-              x->set_req( 0, region->in(i) );
-            } else {
-              x->set_req( 0, NULL );
-            }
-            for( uint j = 1; j < this->req(); j++ ) {
-              Node *in = this->in(j);
-              if( in->is_Phi() && in->in(0) == region )
-                x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone
-            }
-          }
-          // Check for a 'win' on some paths
-          const Type *t = x->Value(igvn);
-
-          bool singleton = t->singleton();
-
-          // See comments in PhaseIdealLoop::split_thru_phi().
-          if( singleton && t == Type::TOP ) {
-            singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
-          }
-
-          if( singleton ) {
-            wins++;
-            x = igvn->makecon(t);
-          } else {
-            // We now call Identity to try to simplify the cloned node.
-            // Note that some Identity methods call phase->type(this).
-            // Make sure that the type array is big enough for
-            // our new node, even though we may throw the node away.
-            // (This tweaking with igvn only works because x is a new node.)
-            igvn->set_type(x, t);
-            Node *y = x->Identity(igvn);
-            if( y != x ) {
-              wins++;
-              x = y;
-            } else {
-              y = igvn->hash_find(x);
-              if( y ) {
-                wins++;
-                x = y;
-              } else {
-                // Else x is a new node we are keeping
-                // We do not need register_new_node_with_optimizer
-                // because set_type has already been called.
-                igvn->_worklist.push(x);
-              }
-            }
-          }
-          if (x != the_clone && the_clone != NULL)
-            igvn->remove_dead_node(the_clone);
-          phi->set_req(i, x);
-        }
-        if( wins > 0 ) {
-          // Record Phi
-          igvn->register_new_node_with_optimizer(phi);
-          return phi;
-        } else {
-          igvn->remove_dead_node(phi);
-        }
-      }
+      // Split instance field load through Phi.
+      Node* result = split_through_phi(phase);
+      if (result != NULL) return result;
     }
   }
 
@@ -1835,7 +1861,7 @@
   case T_ADDRESS:
   case T_OBJECT:
 #ifdef _LP64
-    if (adr->bottom_type()->is_narrow() ||
+    if (adr->bottom_type()->is_ptr_to_narrowoop() ||
         (UseCompressedOops && val->bottom_type()->isa_klassptr() &&
          adr->bottom_type()->isa_rawptr())) {
       const TypePtr* type = val->bottom_type()->is_ptr();
--- a/src/share/vm/opto/memnode.hpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/memnode.hpp	Wed May 21 10:45:07 2008 -0700
@@ -72,7 +72,8 @@
   // This one should probably be a phase-specific function:
   static bool all_controls_dominate(Node* dom, Node* sub);
 
-  // Is this Node a MemNode or some descendent?  Default is YES.
+  // Find any cast-away of null-ness and keep its control.
+  static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
 
   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
@@ -150,6 +151,9 @@
   // zero out the control input.
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 
+  // Split instance field load through Phi.
+  Node* split_through_phi(PhaseGVN *phase);
+
   // Recover original value from boxed values
   Node *eliminate_autobox(PhaseGVN *phase);
 
--- a/src/share/vm/opto/node.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/node.cpp	Wed May 21 10:45:07 2008 -0700
@@ -1049,51 +1049,80 @@
   Node* orig_sub = sub;
   nlist.clear();
   bool this_dominates = false;
-  uint region_input = 0;
+  bool result = false; // Conservative answer
+
   while (sub != NULL) {        // walk 'sub' up the chain to 'this'
     if (sub == this) {
       if (nlist.size() == 0) {
         // No Region nodes except loops were visited before and the EntryControl
         // path was taken for loops: it did not walk in a cycle.
-        return true;
-      } else if (!this_dominates) {
+        result = true;
+        break;
+      } else if (this_dominates) {
+        result = false;          // already met before: walk in a cycle
+        break;
+      } else {
         // Region nodes were visited. Continue walk up to Start or Root
         // to make sure that it did not walk in a cycle.
         this_dominates = true; // first time meet
         iterations_without_region_limit = DominatorSearchLimit; // Reset
-      } else {
-        return false;          // already met before: walk in a cycle
-      }
+     }
     }
-    if (sub->is_Start() || sub->is_Root())
-      return this_dominates;
-
+    if (sub->is_Start() || sub->is_Root()) {
+      result = this_dominates;
+      break;
+    }
     Node* up = sub->find_exact_control(sub->in(0));
-    if (up == NULL || up->is_top())
-      return false; // Conservative answer for dead code
+    if (up == NULL || up->is_top()) {
+      result = false; // Conservative answer for dead code
+      break;
+    }
+    if (sub == up && (sub->is_Loop() || sub->is_Region() && sub->req() != 3)) {
+      // Take first valid path on the way up to 'this'.
+      up = sub->in(1); // in(LoopNode::EntryControl);
+    } else if (sub == up && sub->is_Region()) {
+      assert(sub->req() == 3, "sanity");
+      iterations_without_region_limit = DominatorSearchLimit; // Reset
 
-    if (sub == up && sub->is_Loop()) {
-      up = sub->in(1); // in(LoopNode::EntryControl);
-    } else if (sub == up && sub->is_Region() && sub->req() == 3) {
-      iterations_without_region_limit = DominatorSearchLimit; // Reset
+      // Try both paths for such Regions.
+      // It is not accurate without regions dominating information.
+      // With such information the other path should be checked for
+      // the most dominating Region which was visited before.
+      bool region_was_visited_before = false;
       uint i = 1;
       uint size = nlist.size();
       if (size == 0) {
-        // No Region nodes (except Loops) were visited before.
+        // No such Region nodes were visited before.
         // Take first valid path on the way up to 'this'.
-      } else if (nlist.at(size - 1) == sub) {
-        // This Region node was just visited. Take other path.
-        i = region_input + 1;
-        nlist.pop();
       } else {
         // Was this Region node visited before?
-        for (uint j = 0; j < size; j++) {
-          if (nlist.at(j) == sub) {
-            return false; // The Region node was visited before. Give up.
+        intptr_t ni;
+        int j = size - 1;
+        for (; j >= 0; j--) {
+          ni = (intptr_t)nlist.at(j);
+          if ((Node*)(ni & ~1) == sub) {
+            if ((ni & 1) != 0) {
+              break; // Visited 2 paths. Give up.
+            } else {
+              // The Region node was visited before only once.
+              nlist.remove(j);
+              region_was_visited_before = true;
+              for (; i < sub->req(); i++) {
+                Node* in = sub->in(i);
+                if (in != NULL && !in->is_top() && in != sub) {
+                  break;
+                }
+              }
+              i++; // Take other path.
+              break;
+            }
           }
         }
+        if (j >= 0 && (ni & 1) != 0) {
+          result = false; // Visited 2 paths. Give up.
+          break;
+        }
         // The Region node was not visited before.
-        // Take first valid path on the way up to 'this'.
       }
       for (; i < sub->req(); i++) {
         Node* in = sub->in(i);
@@ -1102,20 +1131,26 @@
         }
       }
       if (i < sub->req()) {
-        nlist.push(sub);
         up = sub->in(i);
-        region_input = i;
+        if (region_was_visited_before && sub != up) {
+          // Set 0 bit to indicate that both paths were taken.
+          nlist.push((Node*)((intptr_t)sub + 1));
+        } else {
+          nlist.push(sub);
+        }
       }
     }
-    if (sub == up)
-      return false;    // some kind of tight cycle
-
-    if (--iterations_without_region_limit < 0)
-      return false;    // dead cycle
-
+    if (sub == up) {
+      result = false;    // some kind of tight cycle
+      break;
+    }
+    if (--iterations_without_region_limit < 0) {
+      result = false;    // dead cycle
+      break;
+    }
     sub = up;
   }
-  return false;
+  return result;
 }
 
 //------------------------------remove_dead_region-----------------------------
--- a/src/share/vm/opto/output.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/output.cpp	Wed May 21 10:45:07 2008 -0700
@@ -48,6 +48,7 @@
   // Initialize the space for the BufferBlob used to find and verify
   // instruction size in MachNode::emit_size()
   init_scratch_buffer_blob();
+  if (failing())  return; // Out of memory
 
   // Make sure I can find the Start Node
   Block_Array& bbs = _cfg->_bbs;
--- a/src/share/vm/opto/type.cpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/type.cpp	Wed May 21 10:45:07 2008 -0700
@@ -311,8 +311,18 @@
   mreg2type[Op_RegFlags] = TypeInt::CC;
 
   TypeAryPtr::RANGE   = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), current->env()->Object_klass(), false, arrayOopDesc::length_offset_in_bytes());
-  // There is no shared klass for Object[].  See note in TypeAryPtr::klass().
-  TypeAryPtr::OOPS    = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/,  false,  Type::OffsetBot);
+
+  TypeAryPtr::NARROWOOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeNarrowOop::BOTTOM, TypeInt::POS), NULL /*ciArrayKlass::make(o)*/,  false,  Type::OffsetBot);
+
+#ifdef _LP64
+  if (UseCompressedOops) {
+    TypeAryPtr::OOPS  = TypeAryPtr::NARROWOOPS;
+  } else
+#endif
+  {
+    // There is no shared klass for Object[].  See note in TypeAryPtr::klass().
+    TypeAryPtr::OOPS  = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/,  false,  Type::OffsetBot);
+  }
   TypeAryPtr::BYTES   = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::BYTE      ,TypeInt::POS), ciTypeArrayKlass::make(T_BYTE),   true,  Type::OffsetBot);
   TypeAryPtr::SHORTS  = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::SHORT     ,TypeInt::POS), ciTypeArrayKlass::make(T_SHORT),  true,  Type::OffsetBot);
   TypeAryPtr::CHARS   = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::CHAR      ,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR),   true,  Type::OffsetBot);
@@ -321,9 +331,10 @@
   TypeAryPtr::FLOATS  = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::FLOAT        ,TypeInt::POS), ciTypeArrayKlass::make(T_FLOAT),  true,  Type::OffsetBot);
   TypeAryPtr::DOUBLES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::DOUBLE       ,TypeInt::POS), ciTypeArrayKlass::make(T_DOUBLE), true,  Type::OffsetBot);
 
-  TypeAryPtr::_array_body_type[T_NARROWOOP] = NULL; // what should this be?
+  // Nobody should ask _array_body_type[T_NARROWOOP]. Use NULL as assert.
+  TypeAryPtr::_array_body_type[T_NARROWOOP] = NULL;
   TypeAryPtr::_array_body_type[T_OBJECT]  = TypeAryPtr::OOPS;
-  TypeAryPtr::_array_body_type[T_ARRAY]   = TypeAryPtr::OOPS;   // arrays are stored in oop arrays
+  TypeAryPtr::_array_body_type[T_ARRAY]   = TypeAryPtr::OOPS; // arrays are stored in oop arrays
   TypeAryPtr::_array_body_type[T_BYTE]    = TypeAryPtr::BYTES;
   TypeAryPtr::_array_body_type[T_BOOLEAN] = TypeAryPtr::BYTES;  // boolean[] is a byte array
   TypeAryPtr::_array_body_type[T_SHORT]   = TypeAryPtr::SHORTS;
@@ -696,7 +707,7 @@
   ResourceMark rm;
   Dict d(cmpkey,hashkey);       // Stop recursive type dumping
   dump2(d,1, st);
-  if (isa_ptr() && is_ptr()->is_narrow()) {
+  if (is_ptr_to_narrowoop()) {
     st->print(" [narrow]");
   }
 }
@@ -2146,6 +2157,67 @@
 // Convenience common pre-built type.
 const TypeOopPtr *TypeOopPtr::BOTTOM;
 
+//------------------------------TypeOopPtr-------------------------------------
+TypeOopPtr::TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id )
+  : TypePtr(t, ptr, offset),
+    _const_oop(o), _klass(k),
+    _klass_is_exact(xk),
+    _is_ptr_to_narrowoop(false),
+    _instance_id(instance_id) {
+#ifdef _LP64
+  if (UseCompressedOops && _offset != 0) {
+    if (klass() == NULL) {
+      assert(this->isa_aryptr(), "only arrays without klass");
+      _is_ptr_to_narrowoop = true;
+    } else if (_offset == oopDesc::klass_offset_in_bytes()) {
+      _is_ptr_to_narrowoop = true;
+    } else if (this->isa_aryptr()) {
+      _is_ptr_to_narrowoop = (klass()->is_obj_array_klass() &&
+                             _offset != arrayOopDesc::length_offset_in_bytes());
+    } else if (klass() == ciEnv::current()->Class_klass() &&
+               (_offset == java_lang_Class::klass_offset_in_bytes() ||
+                _offset == java_lang_Class::array_klass_offset_in_bytes())) {
+      // Special hidden fields from the Class.
+      assert(this->isa_instptr(), "must be an instance ptr.");
+      _is_ptr_to_narrowoop = true;
+    } else if (klass()->is_instance_klass()) {
+      ciInstanceKlass* ik = klass()->as_instance_klass();
+      ciField* field = NULL;
+      if (this->isa_klassptr()) {
+        // Perm objects don't use compressed references, except for
+        // static fields which are currently compressed.
+        field = ik->get_field_by_offset(_offset, true);
+        if (field != NULL) {
+          BasicType basic_elem_type = field->layout_type();
+          _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT ||
+                                  basic_elem_type == T_ARRAY);
+        }
+      } else if (_offset == OffsetBot || _offset == OffsetTop) {
+        // unsafe access
+        _is_ptr_to_narrowoop = true;
+      } else { // exclude unsafe ops
+        assert(this->isa_instptr(), "must be an instance ptr.");
+        // Field which contains a compressed oop references.
+        field = ik->get_field_by_offset(_offset, false);
+        if (field != NULL) {
+          BasicType basic_elem_type = field->layout_type();
+          _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT ||
+                                  basic_elem_type == T_ARRAY);
+        } else if (klass()->equals(ciEnv::current()->Object_klass())) {
+          // Compile::find_alias_type() cast exactness on all types to verify
+          // that it does not affect alias type.
+          _is_ptr_to_narrowoop = true;
+        } else {
+          // Type for the copy start in LibraryCallKit::inline_native_clone().
+          assert(!klass_is_exact(), "only non-exact klass");
+          _is_ptr_to_narrowoop = true;
+        }
+      }
+    }
+  }
+#endif
+}
+
 //------------------------------make-------------------------------------------
 const TypeOopPtr *TypeOopPtr::make(PTR ptr,
                                    int offset) {
@@ -2593,9 +2665,13 @@
 //-----------------------------cast_to_instance-------------------------------
 const TypeOopPtr *TypeInstPtr::cast_to_instance(int instance_id) const {
   if( instance_id == _instance_id) return this;
-  bool exact = (instance_id == UNKNOWN_INSTANCE) ? _klass_is_exact : true;
-
-  return make(ptr(), klass(), exact, const_oop(), _offset, instance_id);
+  bool exact = true;
+  PTR  ptr_t = NotNull;
+  if (instance_id == UNKNOWN_INSTANCE) {
+    exact = _klass_is_exact;
+    ptr_t = _ptr;
+  }
+  return make(ptr_t, klass(), exact, const_oop(), _offset, instance_id);
 }
 
 //------------------------------xmeet_unloaded---------------------------------
@@ -3014,6 +3090,7 @@
 // Convenience common pre-built types.
 const TypeAryPtr *TypeAryPtr::RANGE;
 const TypeAryPtr *TypeAryPtr::OOPS;
+const TypeAryPtr *TypeAryPtr::NARROWOOPS;
 const TypeAryPtr *TypeAryPtr::BYTES;
 const TypeAryPtr *TypeAryPtr::SHORTS;
 const TypeAryPtr *TypeAryPtr::CHARS;
@@ -3063,8 +3140,13 @@
 //-----------------------------cast_to_instance-------------------------------
 const TypeOopPtr *TypeAryPtr::cast_to_instance(int instance_id) const {
   if( instance_id == _instance_id) return this;
-  bool exact = (instance_id == UNKNOWN_INSTANCE) ? _klass_is_exact : true;
-  return make(ptr(), const_oop(), _ary, klass(), exact, _offset, instance_id);
+  bool exact = true;
+  PTR  ptr_t = NotNull;
+  if (instance_id == UNKNOWN_INSTANCE) {
+    exact = _klass_is_exact;
+    ptr_t = _ptr;
+  }
+  return make(ptr_t, const_oop(), _ary, klass(), exact, _offset, instance_id);
 }
 
 //-----------------------------narrow_size_type-------------------------------
@@ -3547,7 +3629,7 @@
     k_ary = ciTypeArrayKlass::make(el->basic_type());
   }
 
-  if( this != TypeAryPtr::OOPS )
+  if( this != TypeAryPtr::OOPS ) {
     // The _klass field acts as a cache of the underlying
     // ciKlass for this array type.  In order to set the field,
     // we need to cast away const-ness.
@@ -3562,6 +3644,11 @@
     // a bit less efficient than caching, but calls to
     // TypeAryPtr::OOPS->klass() are not common enough to matter.
     ((TypeAryPtr*)this)->_klass = k_ary;
+    if (UseCompressedOops && k_ary != NULL && k_ary->is_obj_array_klass() &&
+        _offset != 0 && _offset != arrayOopDesc::length_offset_in_bytes()) {
+      ((TypeAryPtr*)this)->_is_ptr_to_narrowoop = true;
+    }
+  }
   return k_ary;
 }
 
--- a/src/share/vm/opto/type.hpp	Tue May 20 06:32:58 2008 -0700
+++ b/src/share/vm/opto/type.hpp	Wed May 21 10:45:07 2008 -0700
@@ -191,9 +191,8 @@
   virtual const Type *filter( const Type *kills ) const;
 
   // Returns true if this pointer points at memory which contains a
-  // compressed oop references.  In 32-bit builds it's non-virtual
-  // since we don't support compressed oops at all in the mode.
-  LP64_ONLY(virtual) bool is_narrow() const { return false; }
+  // compressed oop references.
+  bool is_ptr_to_narrowoop() const;
 
   // Convenience access
   float getf() const;
@@ -213,8 +212,8 @@
   const TypePtr    *isa_ptr() const;             // Returns NULL if not ptr type
   const TypeRawPtr *isa_rawptr() const;          // NOT Java oop
   const TypeRawPtr *is_rawptr() const;           // Asserts is rawptr
-  const TypeNarrowOop  *is_narrowoop() const;        // Java-style GC'd pointer
-  const TypeNarrowOop  *isa_narrowoop() const;       // Returns NULL if not oop ptr type
+  const TypeNarrowOop  *is_narrowoop() const;    // Java-style GC'd pointer
+  const TypeNarrowOop  *isa_narrowoop() const;   // Returns NULL if not oop ptr type
   const TypeOopPtr   *isa_oopptr() const;        // Returns NULL if not oop ptr type
   const TypeOopPtr   *is_oopptr() const;         // Java-style GC'd pointer
   const TypeKlassPtr *isa_klassptr() const;      // Returns NULL if not KlassPtr
@@ -643,7 +642,7 @@
 // Some kind of oop (Java pointer), either klass or instance or array.
 class TypeOopPtr : public TypePtr {
 protected:
-  TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id ) : TypePtr(t, ptr, offset), _const_oop(o), _klass(k), _klass_is_exact(xk), _instance_id(instance_id) { }
+  TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id );
 public:
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
@@ -660,8 +659,9 @@
   ciKlass*      _klass;       // Klass object
   // Does the type exclude subclasses of the klass?  (Inexact == polymorphic.)
   bool          _klass_is_exact;
+  bool          _is_ptr_to_narrowoop;
 
-  int          _instance_id;   // if not UNKNOWN_INSTANCE, indicates that this is a particular instance
+  int           _instance_id;  // if not UNKNOWN_INSTANCE, indicates that this is a particular instance
                                // of this type which is distinct.  This is the  the node index of the
                                // node creating this instance
 
@@ -696,6 +696,11 @@
   ciObject* const_oop()    const { return _const_oop; }
   virtual ciKlass* klass() const { return _klass;     }
   bool klass_is_exact()    const { return _klass_is_exact; }
+
+  // Returns true if this pointer points at memory which contains a
+  // compressed oop references.
+  bool is_ptr_to_narrowoop_nv() const { return _is_ptr_to_narrowoop; }
+
   bool is_instance()       const { return _instance_id != UNKNOWN_INSTANCE; }
   uint instance_id()       const { return _instance_id; }
   bool is_instance_field() const { return _instance_id != UNKNOWN_INSTANCE && _offset >= 0; }
@@ -716,12 +721,6 @@
   // returns the equivalent compressed version of this pointer type
   virtual const TypeNarrowOop* make_narrowoop() const;
 
-#ifdef _LP64
-  virtual bool is_narrow() const {
-    return (UseCompressedOops && _offset != 0);
-  }
-#endif
-
   virtual const Type *xmeet( const Type *t ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
 
@@ -843,15 +842,10 @@
   virtual const Type *xmeet( const Type *t ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
 
-#ifdef _LP64
-  virtual bool is_narrow() const {
-    return (UseCompressedOops && klass() != NULL && _offset != 0);
-  }
-#endif
-
   // Convenience common pre-built types.
   static const TypeAryPtr *RANGE;
   static const TypeAryPtr *OOPS;
+  static const TypeAryPtr *NARROWOOPS;
   static const TypeAryPtr *BYTES;
   static const TypeAryPtr *SHORTS;
   static const TypeAryPtr *CHARS;
@@ -901,18 +895,6 @@
   virtual const Type    *xmeet( const Type *t ) const;
   virtual const Type    *xdual() const;      // Compute dual right now.
 
-#ifdef _LP64
-  // Perm objects don't use compressed references, except for static fields
-  // which are currently compressed
-  virtual bool is_narrow() const {
-    if (UseCompressedOops && _offset != 0 && _klass->is_instance_klass()) {
-      ciInstanceKlass* ik = _klass->as_instance_klass();
-      return ik != NULL && ik->get_field_by_offset(_offset, true) != NULL;
-    }
-    return false;
-  }
-#endif
-
   // Convenience common pre-built types.
   static const TypeKlassPtr* OBJECT; // Not-null object klass or below
   static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
@@ -921,7 +903,7 @@
 #endif
 };
 
-//------------------------------TypeNarrowOop----------------------------------------
+//------------------------------TypeNarrowOop----------------------------------
 // A compressed reference to some kind of Oop.  This type wraps around
 // a preexisting TypeOopPtr and forwards most of it's operations to
 // the underlying type.  It's only real purpose is to track the
@@ -1013,6 +995,14 @@
 };
 
 //------------------------------accessors--------------------------------------
+inline bool Type::is_ptr_to_narrowoop() const {
+#ifdef _LP64
+  return (isa_oopptr() != NULL && is_oopptr()->is_ptr_to_narrowoop_nv());
+#else
+  return false;
+#endif
+}
+
 inline float Type::getf() const {
   assert( _base == FloatCon, "Not a FloatCon" );
   return ((TypeF*)this)->_f;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6689060/Test.java	Wed May 21 10:45:07 2008 -0700
@@ -0,0 +1,577 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+/*
+ * @test
+ * @bug 6689060
+ * @summary Escape Analysis does not work with Compressed Oops
+ * @run main/othervm -Xbatch -XX:CompileCommand=exclude,Test.dummy -XX:+AggressiveOpts Test
+ */
+
+import java.lang.reflect.Array;
+
+class Point {
+  int x;
+  int y;
+  Point next;
+  int ax[];
+  int ay[];
+  Point pax[];
+  Point pay[];
+  public Point getNext() {
+    return next;
+  }
+}
+
+public class Test {
+
+  void dummy() {
+    // Empty method to verify correctness of DebugInfo.
+    // Use -XX:CompileCommand=exclude,Test.dummy
+  }
+
+  int ival(int i) {
+    return i*2;
+  }
+
+  int test80(int y, int l, int i) {
+    Point p = new Point();
+    p.ax = new int[2];
+    p.ay = new int[2];
+    int x = 3;
+    p.ax[0] = x;
+    p.ay[1] = 3 * x + y;
+    dummy();
+    return p.ax[0] * p.ay[1];
+  }
+
+  int test81(int y, int l, int i) {
+    Point p = new Point();
+    p.ax = new int[2];
+    p.ay = new int[2];
+    int x = 3;
+    p.ax[0] = x;
+    p.ay[1] = 3 * x + y;
+    dummy();
+    return p.ax[0] * p.ay[1];
+  }
+
+
+  int test44(int y) {
+    Point p1 = new Point();
+    p1.x = ival(3);
+    dummy();
+    p1.y = 3 * p1.x + y;
+    return p1.y;
+  }
+
+  int test43(int y) {
+    Point p1 = new Point();
+    if ( (y & 1) == 1 ) {
+      p1.x = ival(3);
+    } else {
+      p1.x = ival(5);
+    }
+    dummy();
+    p1.y = 3 * p1.x + y;
+    return p1.y;
+  }
+
+  int test42(int y) {
+    Point p1 = new Point();
+    p1.x = 3;
+    for (int i = 0; i < y; i++) {
+      if ( (i & 1) == 1 ) {
+        p1.x += 4;
+      }
+    }
+    p1.y = 3 * y + p1.x;
+    return p1.y;
+  }
+
+  int test40(int y) {
+    Point p1 = new Point();
+    if ( (y & 1) == 1 ) {
+      p1.x = 3;
+    } else {
+      p1.x = 5;
+    }
+    p1.y = 3 * p1.x + y;
+    return p1.y;
+  }
+
+  int test41(int y) {
+    Point p1 = new Point();
+    if ( (y & 1) == 1 ) {
+      p1.x += 4;
+    } else {
+      p1.x += 5;
+    }
+    p1.y = 3 * p1.x + y;
+    return p1.y;
+  }
+
+  Point test00(int y) {
+    int x = 3;
+    Point p = new Point();
+    p.x = x;
+    p.y = 3 * x + y;
+    return p;
+  }
+
+  Point test01(int y) {
+    int x = 3;
+    Point p = new Point();
+    p.x = x;
+    p.y = 3 * x + y;
+    dummy();
+    return p;
+  }
+
+  Point test02(int y) {
+    int x = 3;
+    Point p1 = null;
+    for (int i = 0; i < y; i++) {
+      Point p2 = new Point();
+      p2.x = x;
+      p2.y = 3 * y + x;
+      p2.next = p1;
+      p1 = p2;
+    }
+    return p1;
+  }
+
+  Point test03(int y) {
+    int x = 3;
+    Point p1 = null;
+    for (int i = 0; i < y; i++) {
+      Point p2 = new Point();
+      p2.x = x;
+      p2.y = 3 * y + x;
+      p2.next = p1;
+      p1 = p2;
+    }
+    dummy();
+    return p1;
+  }
+
+  Point test04(int y) {
+    int x = 3;
+    Point p1 = null;
+    for (int i = 0; i < y; i++) {
+      Point p2 = new Point();
+      p2.x = x;
+      p2.y = 3 * y + x;
+      p2.next = p1;
+      dummy();
+      p1 = p2;
+    }
+    return p1;
+  }
+
+  int test05(int y) {
+    int x = 3;
+    Point p1 = new Point();
+    for (int i = 0; i < y; i++) {
+      Point p2 = new Point();
+      p2.x = x;
+      p2.y = 3 * y + x;
+      p1.next = p2;
+      p1 = p2;
+    }
+    return p1.y;
+  }
+
+  int test0(int y) {
+    int x = 3;
+    Point p = new Point();
+    p.x = x;
+    p.y = 3 * x + y;
+    dummy();
+    return p.x * p.y;
+  }
+
+  int test1(int y) {
+    Point p = new Point();
+    if ( (y & 1) == 1 ) {
+      p = new Point(); // Kill previous
+    }
+    int x = 3;
+    p.x = x;
+    p.y = 3 * x + y;
+    dummy();
+    return p.x * p.y;
+  }
+
+  int test2(int y) {
+    Point p1 = new Point();
+    Point p2 = new Point();
+    p1.x = 3;
+    p2.x = 4;
+    p1.y = 3 * p2.x + y;
+    p2.y = 3 * p1.x + y;
+    dummy();
+    return p1.y * p2.y;
+  }
+
+  int test3(int y, Point p1) {
+    Point p2 = new Point();
+    p1.x = 3;
+    p2.x = 4;
+    p1.y = 3 * p2.x + y;
+    p2.y = 3 * p1.x + y;
+    dummy();
+    return p1.y * p2.y;
+  }
+
+  int test4(int y) {
+    Point p1 = new Point();
+    Point p2 = new Point();
+    if ( (y & 1) == 1 ) {
+      p1.x = 3;
+      p2.x = 4;
+    } else {
+      p1.x = 5;
+      p2.x = 6;
+    }
+    p1.y = 3 * p2.x + y;
+    p2.y = 3 * p1.x + y;
+    dummy();
+    return p1.y * p2.y;
+  }
+
+  int test5(int y, Point p1) {
+    Point p2 = new Point();
+    if ( (y & 1) == 1 ) {
+      p1.x = 3;
+      p2.x = 4;
+    } else {
+      p1.x = 5;
+      p2.x = 6;
+    }
+    p1.y = 3 * p2.x + y;
+    p2.y = 3 * p1.x + y;
+    dummy();
+    return p1.y * p2.y;
+  }
+
+  int test6(int y) {
+    Point p1 = new Point();
+    Point p2 = new Point();
+    p1.next = p2;
+    if ( (y & 1) == 1 ) {
+      p1.x = 3;
+      p1.getNext().x = 4;
+    } else {
+      p1.x = 5;
+      p1.getNext().x = 6;
+    }
+    p1.y = 3 * p2.x + y;
+    p2.y = 3 * p1.x + y;
+    dummy();
+    return p1.y * p2.y;
+  }
+
+  int test7(int y, Point p1) {
+    Point p2 = new Point();
+    p1.next = p2;
+    if ( (y & 1) == 1 ) {
+      p1.x = 3;
+      p1.getNext().x = 4;
+    } else {
+      p1.x = 5;
+      p1.getNext().x = 6;
+    }
+    p1.y = 3 * p2.x + y;
+    p2.y = 3 * p1.x + y;
+    dummy();
+    return p1.y * p2.y;
+  }
+
+  int test8(int y, int l, int i) {
+    Point p = new Point();
+    p.ax = new int[l];
+    p.ay = new int[l];
+    int x = 3;
+    p.ax[i] = x;
+    p.ay[i] = 3 * x + y;
+    dummy();
+    return p.ax[i] * p.ay[i];
+  }
+
+  int test9(int y, int l, int i) {
+    Point p = new Point();
+    p.pax = new Point[l];
+    p.pay = new Point[l];
+    p.pax[i] = new Point();
+    p.pay[i] = new Point();
+    p.pax[i].x = 3;
+    p.pay[i].x = 4;
+    p.pax[i].y = 3 * p.pay[i].x + y;
+    p.pay[i].y = 3 * p.pax[i].x + y;
+    dummy();
+    return p.pax[i].y * p.pay[i].y;
+  }
+
+  int test10(int y, int l, int i, Class cls) {
+    Point p = new Point();
+    try {
+      p.pax = (Point[])Array.newInstance(cls, l);
+      p.pax[i] = (Point)cls.newInstance();
+    }
+    catch(java.lang.InstantiationException ex) {
+      return 0;
+    }
+    catch(java.lang.IllegalAccessException ex) {
+      return 0;
+    }
+    p.pax[i].x = 3;
+    p.pax[i].y = 3 * p.pax[i].x + y;
+    dummy();
+    return p.pax[i].x * p.pax[i].y;
+  }
+
+  int test11(int y) {
+    Point p1 = new Point();
+    Point p2 = new Point();
+    p1.next = p2;
+    if ( (y & 1) == 1 ) {
+      p1.x = 3;
+      p1.next.x = 4;
+    } else {
+      p1.x = 5;
+      p1.next.x = 6;
+    }
+    p1.y = 3 * p1.next.x + y;
+    p1.next.y = 3 * p1.x + y;
+    dummy();
+    return p1.y * p1.next.y;
+  }
+
+  int test12(int y) {
+    Point p1 = new Point();
+    p1.next = p1;
+    if ( (y & 1) == 1 ) {
+      p1.x = 3;
+      p1.next.x = 4;
+    } else {
+      p1.x = 5;
+      p1.next.x = 6;
+    }
+    p1.y = 3 * p1.next.x + y;
+    p1.next.y = 3 * p1.x + y;
+    dummy();
+    return p1.y * p1.next.y;
+  }
+
+
+  public static void main(String args[]) {
+    Test tsr    = new Test();
+    Point p     = new Point();
+    Point ptmp  = p;
+    Class cls   = Point.class;
+    int y = 0;
+    for (int i=0; i<10000; i++) {
+      ptmp.next = tsr.test00(1);
+      ptmp.next = tsr.test01(1);
+      ptmp.next = tsr.test02(1);
+      ptmp.next = tsr.test03(1);
+      ptmp.next = tsr.test04(1);
+
+      y = tsr.test05(1);
+
+      y = tsr.test80(y, 1, 0);
+      y = tsr.test81(y, 1, 0);
+
+      y = tsr.test44(y);
+      y = tsr.test43(y);
+      y = tsr.test42(y);
+      y = tsr.test40(y);
+      y = tsr.test41(y);
+
+      y = tsr.test0(y);
+      y = tsr.test1(y);
+      y = tsr.test2(y);
+      y = tsr.test3(y, p);
+      y = tsr.test4(y);
+      y = tsr.test5(y, p);
+      y = tsr.test6(y);
+      y = tsr.test7(y, p);
+      y = tsr.test8(y, 1, 0);
+      y = tsr.test9(y, 1, 0);
+      y = tsr.test10(y, 1, 0, cls);
+      y = tsr.test11(y);
+      y = tsr.test12(y);
+    }
+    for (int i=0; i<10000; i++) {
+      ptmp.next = tsr.test00(1);
+      ptmp.next = tsr.test01(1);
+      ptmp.next = tsr.test02(1);
+      ptmp.next = tsr.test03(1);
+      ptmp.next = tsr.test04(1);
+
+      y = tsr.test05(1);
+
+      y = tsr.test80(y, 1, 0);
+      y = tsr.test81(y, 1, 0);
+
+      y = tsr.test44(y);
+      y = tsr.test43(y);
+      y = tsr.test42(y);
+      y = tsr.test40(y);
+      y = tsr.test41(y);
+
+      y = tsr.test0(y);
+      y = tsr.test1(y);
+      y = tsr.test2(y);
+      y = tsr.test3(y, p);
+      y = tsr.test4(y);
+      y = tsr.test5(y, p);
+      y = tsr.test6(y);
+      y = tsr.test7(y, p);
+      y = tsr.test8(y, 1, 0);
+      y = tsr.test9(y, 1, 0);
+      y = tsr.test10(y, 1, 0, cls);
+      y = tsr.test11(y);
+      y = tsr.test12(y);
+    }
+    for (int i=0; i<10000; i++) {
+      ptmp.next = tsr.test00(1);
+      ptmp.next = tsr.test01(1);
+      ptmp.next = tsr.test02(1);
+      ptmp.next = tsr.test03(1);
+      ptmp.next = tsr.test04(1);
+
+      y = tsr.test05(1);
+
+      y = tsr.test80(y, 1, 0);
+      y = tsr.test81(y, 1, 0);
+
+      y = tsr.test44(y);
+      y = tsr.test43(y);
+      y = tsr.test42(y);
+      y = tsr.test40(y);
+      y = tsr.test41(y);
+
+      y = tsr.test0(y);
+      y = tsr.test1(y);
+      y = tsr.test2(y);
+      y = tsr.test3(y, p);
+      y = tsr.test4(y);
+      y = tsr.test5(y, p);
+      y = tsr.test6(y);
+      y = tsr.test7(y, p);
+      y = tsr.test8(y, 1, 0);
+      y = tsr.test9(y, 1, 0);
+      y = tsr.test10(y, 1, 0, cls);
+      y = tsr.test11(y);
+      y = tsr.test12(y);
+    }
+
+    int z = 0;
+    y = tsr.test80(0, 1, 0);
+    z += y;
+    System.out.println("After 'test80' y=" + y);
+    y = tsr.test81(0, 1, 0);
+    z += y;
+    System.out.println("After 'test81' y=" + y);
+
+    y = tsr.test44(0);
+    z += y;
+    System.out.println("After 'test44' y=" + y);
+    y = tsr.test43(0);
+    z += y;
+    System.out.println("After 'test43' y=" + y);
+    y = tsr.test42(0);
+    z += y;
+    System.out.println("After 'test42' y=" + y);
+    y = tsr.test40(0);
+    z += y;
+    System.out.println("After 'test40' y=" + y);
+    y = tsr.test41(0);
+    z += y;
+    System.out.println("After 'test41' y=" + y);
+
+    ptmp.next = tsr.test00(1);
+    z += y;
+    System.out.println("After 'test00' p.y=" + ptmp.next.y);
+    ptmp.next = tsr.test01(1);
+    z += y;
+    System.out.println("After 'test01' p.y=" + ptmp.next.y);
+    ptmp.next = tsr.test02(1);
+    z += y;
+    System.out.println("After 'test02' p.y=" + ptmp.next.y);
+    ptmp.next = tsr.test03(1);
+    z += y;
+    System.out.println("After 'test03' p.y=" + ptmp.next.y);
+    ptmp.next = tsr.test04(1);
+    z += y;
+    System.out.println("After 'test04' p.y=" + ptmp.next.y);
+
+    y = tsr.test05(1);
+    z += y;
+    System.out.println("After 'test05' y=" + y);
+
+    y = tsr.test0(0);
+    z += y;
+    System.out.println("After 'test0' y=" + y);
+    y = tsr.test1(0);
+    z += y;
+    System.out.println("After 'test1' y=" + y);
+    y = tsr.test2(0);
+    z += y;
+    System.out.println("After 'test2' y=" + y);
+    y = tsr.test3(0, new Point());
+    z += y;
+    System.out.println("After 'test3' y=" + y);
+    y = tsr.test4(0);
+    z += y;
+    System.out.println("After 'test4' y=" + y);
+    y = tsr.test5(0, new Point());
+    z += y;
+    System.out.println("After 'test5' y=" + y);
+    y = tsr.test6(0);
+    z += y;
+    System.out.println("After 'test6' y=" + y);
+    y = tsr.test7(0, new Point());
+    z += y;
+    System.out.println("After 'test7' y=" + y);
+    y = tsr.test8(0, 1, 0);
+    z += y;
+    System.out.println("After 'test8' y=" + y);
+    y = tsr.test9(0, 1, 0);
+    z += y;
+    System.out.println("After 'test9' y=" + y);
+    y = tsr.test10(0, 1, 0, cls);
+    z += y;
+    System.out.println("After 'test10' y=" + y);
+    y = tsr.test11(0);
+    z += y;
+    System.out.println("After 'test11' y=" + y);
+    y = tsr.test12(0);
+    z += y;
+    System.out.println("After 'test12' y=" + y);
+    System.out.println("Sum of y =" + z);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6695810/Test.java	Wed May 21 10:45:07 2008 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+/*
+ * @test
+ * @bug 6695810
+ * @summary null oop passed to encode_heap_oop_not_null
+ * @run main/othervm -Xbatch Test
+ */
+
+public class Test {
+    Test _t;
+
+    static void test(Test t1, Test t2) {
+        if (t2 != null)
+            t1._t = t2;
+
+        if (t2 != null)
+            t1._t = t2;
+    }
+
+    public static void main(String[] args) {
+        Test t = new Test();
+        for (int i = 0; i < 50; i++) {
+            for (int j = 0; j < 100; j++) {
+                test(t, t);
+            }
+            test(t, null);
+        }
+        for (int i = 0; i < 10000; i++) {
+            test(t, t);
+        }
+        test(t, null);
+    }
+}