changeset 36:f34d9da7acb2

6667618: disable LoadL->ConvL2I ==> LoadI optimization Summary: this optimization causes problems (sizes of Load and Store nodes do not match) for objects initialization code and Escape Analysis Reviewed-by: jrose, never
author kvn
date Fri, 29 Feb 2008 19:57:41 -0800
parents e2ae28d2ce91
children 73970d8c0b27
files src/share/vm/opto/connode.cpp src/share/vm/opto/memnode.cpp
diffstat 2 files changed, 10 insertions(+), 41 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/opto/connode.cpp	Fri Feb 29 19:07:15 2008 -0800
+++ b/src/share/vm/opto/connode.cpp	Fri Feb 29 19:57:41 2008 -0800
@@ -982,34 +982,9 @@
     return new (phase->C, 3) AddINode(add1,add2);
   }
 
-  // Fold up with a prior LoadL: LoadL->ConvL2I ==> LoadI
-  // Requires we understand the 'endianess' of Longs.
-  if( andl_op == Op_LoadL ) {
-    Node *adr = andl->in(MemNode::Address);
-    // VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles
-#ifndef VM_LITTLE_ENDIAN
-    // The transformation can cause problems on BIG_ENDIAN architectures
-    // where the jint is not the same address as the jlong. Specifically, we
-    // will fail to insert an anti-dependence in GCM between the LoadI and a
-    // subsequent StoreL because different memory offsets provoke
-    // flatten_alias_type() into indicating two different types.  See bug
-    // 4755222.
-
-    // Node *base = adr->is_AddP() ? adr->in(AddPNode::Base) : adr;
-    // adr = phase->transform( new (phase->C, 4) AddPNode(base,adr,phase->MakeConX(sizeof(jint))));
-    return NULL;
-#else
-    if (phase->C->alias_type(andl->adr_type())->is_volatile()) {
-      // Picking up the low half by itself bypasses the atomic load and we could
-      // end up with more than one non-atomic load.  See bugs 4432655 and 4526490.
-      // We could go to the trouble of iterating over andl's output edges and
-      // punting only if there's more than one real use, but we don't bother.
-      return NULL;
-    }
-    return new (phase->C, 3) LoadINode(andl->in(MemNode::Control),andl->in(MemNode::Memory),adr,((LoadLNode*)andl)->raw_adr_type());
-#endif
-  }
-
+  // Disable optimization: LoadL->ConvL2I ==> LoadI.
+  // It causes problems (sizes of Load and Store nodes do not match)
+  // in objects initialization code and Escape Analysis.
   return NULL;
 }
 
--- a/src/share/vm/opto/memnode.cpp	Fri Feb 29 19:07:15 2008 -0800
+++ b/src/share/vm/opto/memnode.cpp	Fri Feb 29 19:57:41 2008 -0800
@@ -108,19 +108,13 @@
   // Avoid independent memory operations
   Node* old_mem = mem;
 
-  if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
-    InitializeNode* init = mem->in(0)->as_Initialize();
-    if (init->is_complete()) {  // i.e., after macro expansion
-      const TypePtr* tp = t_adr->is_ptr();
-      uint alias_idx = phase->C->get_alias_index(tp);
-      // Free this slice from the init.  It was hooked, temporarily,
-      // by GraphKit::set_output_for_allocation.
-      if (alias_idx > Compile::AliasIdxRaw) {
-        mem = init->memory(alias_idx);
-        // ...but not with the raw-pointer slice.
-      }
-    }
-  }
+  // The code which unhooks non-raw memories from complete (macro-expanded)
+  // initializations was removed. After macro-expansion all stores catched
+  // by Initialize node became raw stores and there is no information
+  // which memory slices they modify. So it is unsafe to move any memory
+  // operation above these stores. Also in most cases hooked non-raw memories
+  // were already unhooked by using information from detect_ptr_independence()
+  // and find_previous_store().
 
   if (mem->is_MergeMem()) {
     MergeMemNode* mmem = mem->as_MergeMem();