changeset 20637:3a8a0fd171c5 hs25.40-b19

Merge
author amurillo
date Fri, 14 Nov 2014 07:37:00 -0800
parents 75ef834238df (current diff) 86c674274c94 (diff)
children 2bd263bb3a75
files test/runtime/NMT/UnsafeMallocLimit.java test/runtime/NMT/UnsafeMallocLimit2.java
diffstat 28 files changed, 295 insertions(+), 342 deletions(-) [+]
line wrap: on
line diff
--- a/make/hotspot_version	Wed Nov 12 13:47:19 2014 -0800
+++ b/make/hotspot_version	Fri Nov 14 07:37:00 2014 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=40
-HS_BUILD_NUMBER=18
+HS_BUILD_NUMBER=19
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/windows/makefiles/compile.make	Wed Nov 12 13:47:19 2014 -0800
+++ b/make/windows/makefiles/compile.make	Fri Nov 14 07:37:00 2014 -0800
@@ -268,7 +268,7 @@
 !endif
 LD_FLAGS= $(LD_FLAGS) kernel32.lib user32.lib gdi32.lib winspool.lib \
  comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib \
- uuid.lib Wsock32.lib winmm.lib /nologo /machine:$(MACHINE) /opt:REF \
+ uuid.lib Wsock32.lib winmm.lib version.lib /nologo /machine:$(MACHINE) /opt:REF \
  /opt:ICF,8
 !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
 LD_FLAGS= $(LD_FLAGS) /map /debug
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -1769,7 +1769,7 @@
     // at [FETCH], below, will never observe a biased encoding (*101b).
     // If this invariant is not held we risk exclusion (safety) failure.
     if (UseBiasedLocking && !UseOptoBiasInlining) {
-      biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
+      biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
     }
 
 #if INCLUDE_RTM_OPT
--- a/src/os/windows/vm/os_windows.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/os/windows/vm/os_windows.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -1650,96 +1650,123 @@
 
 void os::win32::print_windows_version(outputStream* st) {
   OSVERSIONINFOEX osvi;
-  SYSTEM_INFO si;
-
+  VS_FIXEDFILEINFO *file_info;
+  TCHAR kernel32_path[MAX_PATH];
+  UINT len, ret;
+
+  // Use the GetVersionEx information to see if we're on a server or
+  // workstation edition of Windows. Starting with Windows 8.1 we can't
+  // trust the OS version information returned by this API.
   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
-
   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
-    st->print_cr("N/A");
+    st->print_cr("Call to GetVersionEx failed");
+    return;
+  }
+  bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
+
+  // Get the full path to \Windows\System32\kernel32.dll and use that for
+  // determining what version of Windows we're running on.
+  len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
+  ret = GetSystemDirectory(kernel32_path, len);
+  if (ret == 0 || ret > len) {
+    st->print_cr("Call to GetSystemDirectory failed");
+    return;
+  }
+  strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
+
+  DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
+  if (version_size == 0) {
+    st->print_cr("Call to GetFileVersionInfoSize failed");
+    return;
+  }
+
+  LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
+  if (version_info == NULL) {
+    st->print_cr("Failed to allocate version_info");
     return;
   }
 
-  int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
-
-  ZeroMemory(&si, sizeof(SYSTEM_INFO));
-  if (os_vers >= 5002) {
-    // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
-    // find out whether we are running on 64 bit processor or not.
-    if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) {
-      os::Kernel32Dll::GetNativeSystemInfo(&si);
+  if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
+    os::free(version_info);
+    st->print_cr("Call to GetFileVersionInfo failed");
+    return;
+  }
+
+  if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
+    os::free(version_info);
+    st->print_cr("Call to VerQueryValue failed");
+    return;
+  }
+
+  int major_version = HIWORD(file_info->dwProductVersionMS);
+  int minor_version = LOWORD(file_info->dwProductVersionMS);
+  int build_number = HIWORD(file_info->dwProductVersionLS);
+  int build_minor = LOWORD(file_info->dwProductVersionLS);
+  int os_vers = major_version * 1000 + minor_version;
+  os::free(version_info);
+
+  st->print(" Windows ");
+  switch (os_vers) {
+
+  case 6000:
+    if (is_workstation) {
+      st->print("Vista");
     } else {
-      GetSystemInfo(&si);
+      st->print("Server 2008");
     }
-  }
-
-  if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
-    switch (os_vers) {
-    case 3051: st->print(" Windows NT 3.51"); break;
-    case 4000: st->print(" Windows NT 4.0"); break;
-    case 5000: st->print(" Windows 2000"); break;
-    case 5001: st->print(" Windows XP"); break;
-    case 5002:
-      if (osvi.wProductType == VER_NT_WORKSTATION &&
-          si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
-        st->print(" Windows XP x64 Edition");
-      } else {
-        st->print(" Windows Server 2003 family");
-      }
-      break;
-
-    case 6000:
-      if (osvi.wProductType == VER_NT_WORKSTATION) {
-        st->print(" Windows Vista");
-      } else {
-        st->print(" Windows Server 2008");
-      }
-      break;
-
-    case 6001:
-      if (osvi.wProductType == VER_NT_WORKSTATION) {
-        st->print(" Windows 7");
-      } else {
-        st->print(" Windows Server 2008 R2");
-      }
-      break;
-
-    case 6002:
-      if (osvi.wProductType == VER_NT_WORKSTATION) {
-        st->print(" Windows 8");
-      } else {
-        st->print(" Windows Server 2012");
-      }
-      break;
-
-    case 6003:
-      if (osvi.wProductType == VER_NT_WORKSTATION) {
-        st->print(" Windows 8.1");
-      } else {
-        st->print(" Windows Server 2012 R2");
-      }
-      break;
-
-    default: // future os
-      // Unrecognized windows, print out its major and minor versions
-      st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
+    break;
+
+  case 6001:
+    if (is_workstation) {
+      st->print("7");
+    } else {
+      st->print("Server 2008 R2");
+    }
+    break;
+
+  case 6002:
+    if (is_workstation) {
+      st->print("8");
+    } else {
+      st->print("Server 2012");
+    }
+    break;
+
+  case 6003:
+    if (is_workstation) {
+      st->print("8.1");
+    } else {
+      st->print("Server 2012 R2");
     }
-  } else {
-    switch (os_vers) {
-    case 4000: st->print(" Windows 95"); break;
-    case 4010: st->print(" Windows 98"); break;
-    case 4090: st->print(" Windows Me"); break;
-    default: // future windows, print out its major and minor versions
-      st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
+    break;
+
+  case 6004:
+    if (is_workstation) {
+      st->print("10");
+    } else {
+      // The server version name of Windows 10 is not known at this time
+      st->print("%d.%d", major_version, minor_version);
     }
-  }
-
-  if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
+    break;
+
+  default:
+    // Unrecognized windows, print out its major and minor versions
+    st->print("%d.%d", major_version, minor_version);
+    break;
+  }
+
+  // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
+  // find out whether we are running on 64 bit processor or not
+  SYSTEM_INFO si;
+  ZeroMemory(&si, sizeof(SYSTEM_INFO));
+  os::Kernel32Dll::GetNativeSystemInfo(&si);
+  if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
     st->print(" , 64 bit");
   }
 
-  st->print(" Build %d", osvi.dwBuildNumber);
-  st->print(" %s", osvi.szCSDVersion);           // service pack
+  st->print(" Build %d", build_number);
+  st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
   st->cr();
 }
 
@@ -5350,11 +5377,6 @@
   return ::Module32Next(hSnapshot, lpme);
 }
 
-
-inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
-  return true;
-}
-
 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
   ::GetNativeSystemInfo(lpSystemInfo);
 }
--- a/src/os/windows/vm/os_windows.hpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/os/windows/vm/os_windows.hpp	Fri Nov 14 07:37:00 2014 -0800
@@ -192,7 +192,6 @@
   static BOOL Module32First(HANDLE,LPMODULEENTRY32);
   static BOOL Module32Next(HANDLE,LPMODULEENTRY32);
 
-  static BOOL GetNativeSystemInfoAvailable();
   static void GetNativeSystemInfo(LPSYSTEM_INFO);
 
   // NUMA calls
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Fri Nov 14 07:37:00 2014 -0800
@@ -398,7 +398,7 @@
                 "/export:JVM_GetThreadStateNames "+
                 "/export:JVM_GetThreadStateValues "+
                 "/export:JVM_InitAgentProperties");
-        addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib");
+        addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib;version.lib");
         addAttr(rv, "OutputFile", outDll);
         addAttr(rv, "SuppressStartupBanner", "true");
         addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def");
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -2641,7 +2641,7 @@
   // Get the #blocks we want to claim
   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
   assert(n_blks > 0, "Error");
-  assert(ResizePLAB || n_blks == OldPLABSize, "Error");
+  assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
   // In some cases, when the application has a phase change,
   // there may be a sudden and sharp shift in the object survival
   // profile, and updating the counts at the end of a scavenge
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -5228,7 +5228,9 @@
   }
 
   void pre_work_verification() {
-    assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
+    // The VM Thread will have registered Metadata during the single-threaded phase of MetadataStackOnMark.
+    assert(Thread::current()->is_VM_thread()
+           || !MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
   }
 
   void post_work_verification() {
--- a/src/share/vm/opto/doCall.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/opto/doCall.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -791,7 +791,7 @@
   Node* ex_klass_node = NULL;
   if (has_ex_handler() && !ex_type->klass_is_exact()) {
     Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
-    ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
+    ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
 
     // Compute the exception klass a little more cleverly.
     // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
@@ -809,7 +809,7 @@
           continue;
         }
         Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
-        Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
+        Node* k = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
         ex_klass_node->init_req( i, k );
       }
       _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
--- a/src/share/vm/opto/graphKit.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/opto/graphKit.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -1150,7 +1150,7 @@
   Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
   if (akls != NULL)  return akls;
   Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
-  return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) );
+  return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
 }
 
 //-------------------------load_array_length-----------------------------------
@@ -2542,7 +2542,7 @@
   // cache which is mutable so can't use immutable memory.  Other
   // types load from the super-class display table which is immutable.
   Node *kmem = might_be_cache ? memory(p2) : immutable_memory();
-  Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
+  Node* nkls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));
 
   // Compile speed common case: ARE a subtype and we canNOT fail
   if( superklass == nkls )
--- a/src/share/vm/opto/library_call.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/opto/library_call.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -3398,7 +3398,7 @@
   if (region == NULL)  never_see_null = true;
   Node* p = basic_plus_adr(mirror, offset);
   const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
-  Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
+  Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
   Node* null_ctl = top();
   kls = null_check_oop(kls, &null_ctl, never_see_null);
   if (region != NULL) {
@@ -3574,7 +3574,7 @@
       phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
     // If we fall through, it's a plain class.  Get its _super.
     p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
-    kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
+    kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
     null_ctl = top();
     kls = null_check_oop(kls, &null_ctl);
     if (null_ctl != top()) {
@@ -3656,7 +3656,7 @@
     args[which_arg] = arg;
 
     Node* p = basic_plus_adr(arg, class_klass_offset);
-    Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
+    Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
     klasses[which_arg] = _gvn.transform(kls);
   }
 
@@ -5172,7 +5172,7 @@
       // (At this point we can assume disjoint_bases, since types differ.)
       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
-      Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
+      Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
       Node* dest_elem_klass = _gvn.transform(n1);
       Node* cv = generate_checkcast_arraycopy(adr_type,
                                               dest_elem_klass,
--- a/src/share/vm/opto/macro.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/opto/macro.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -2194,7 +2194,7 @@
     Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn);
     if (klass_node == NULL) {
       Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
-      klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
+      klass_node = transform_later(LoadKlassNode::make(_igvn, NULL, mem, k_adr, _igvn.type(k_adr)->is_ptr()));
 #ifdef _LP64
       if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
         assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
--- a/src/share/vm/opto/memnode.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/opto/memnode.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -859,6 +859,10 @@
 
 
 //=============================================================================
+// Should LoadNode::Ideal() attempt to remove control edges?
+bool LoadNode::can_remove_control() const {
+  return true;
+}
 uint LoadNode::size_of() const { return sizeof(*this); }
 uint LoadNode::cmp( const Node &n ) const
 { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
@@ -1455,7 +1459,7 @@
 }
 
 //------------------------------Ideal------------------------------------------
-// If the load is from Field memory and the pointer is non-null, we can
+// If the load is from Field memory and the pointer is non-null, it might be possible to
 // zero out the control input.
 // If the offset is constant and the base is an object allocation,
 // try to hook me up to the exact initializing store.
@@ -1480,6 +1484,7 @@
       && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
     // Check for useless control edge in some common special cases
     if (in(MemNode::Control) != NULL
+        && can_remove_control()
         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
         && all_controls_dominate(base, phase->C->start())) {
       // A method-invariant, non-null address (constant or 'this' argument).
@@ -2007,9 +2012,8 @@
 //=============================================================================
 //----------------------------LoadKlassNode::make------------------------------
 // Polymorphic factory method:
-Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
+Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk) {
   Compile* C = gvn.C;
-  Node *ctl = NULL;
   // sanity check the alias category against the created node type
   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
   assert(adr_type != NULL, "expecting TypeKlassPtr");
@@ -2029,6 +2033,12 @@
   return klass_value_common(phase);
 }
 
+// In most cases, LoadKlassNode does not have the control input set. If the control
+// input is set, it must not be removed (by LoadNode::Ideal()).
+bool LoadKlassNode::can_remove_control() const {
+  return false;
+}
+
 const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
   // Either input is TOP ==> the result is TOP
   const Type *t1 = phase->type( in(MemNode::Memory) );
--- a/src/share/vm/opto/memnode.hpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/opto/memnode.hpp	Fri Nov 14 07:37:00 2014 -0800
@@ -148,6 +148,8 @@
 protected:
   virtual uint cmp(const Node &n) const;
   virtual uint size_of() const; // Size is bigger
+  // Should LoadNode::Ideal() attempt to remove control edges?
+  virtual bool can_remove_control() const;
   const Type* const _type;      // What kind of value is loaded?
 public:
 
@@ -171,8 +173,10 @@
   // we are equivalent to.  We look for Load of a Store.
   virtual Node *Identity( PhaseTransform *phase );
 
-  // If the load is from Field memory and the pointer is non-null, we can
+  // If the load is from Field memory and the pointer is non-null, it might be possible to
   // zero out the control input.
+  // If the offset is constant and the base is an object allocation,
+  // try to hook me up to the exact initializing store.
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 
   // Split instance field load through Phi.
@@ -413,6 +417,10 @@
 //------------------------------LoadKlassNode----------------------------------
 // Load a Klass from an object
 class LoadKlassNode : public LoadPNode {
+protected:
+  // In most cases, LoadKlassNode does not have the control input set. If the control
+  // input is set, it must not be removed (by LoadNode::Ideal()).
+  virtual bool can_remove_control() const;
 public:
   LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
     : LoadPNode(c, mem, adr, at, tk, mo) {}
@@ -422,8 +430,8 @@
   virtual bool depends_only_on_test() const { return true; }
 
   // Polymorphic factory method:
-  static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
-                     const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
+  static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
+                    const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
 };
 
 //------------------------------LoadNKlassNode---------------------------------
--- a/src/share/vm/opto/parse1.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/opto/parse1.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -1958,7 +1958,7 @@
   // finalization.  In general this will fold up since the concrete
   // class is often visible so the access flags are constant.
   Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
-  Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
+  Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), klass_addr, TypeInstPtr::KLASS));
 
   Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
   Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
--- a/src/share/vm/opto/parseHelper.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/opto/parseHelper.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -156,22 +156,43 @@
   int klass_offset = oopDesc::klass_offset_in_bytes();
   Node* p = basic_plus_adr( ary, ary, klass_offset );
   // p's type is array-of-OOPS plus klass_offset
-  Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) );
+  Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
   // Get the array klass
   const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
 
-  // array_klass's type is generally INexact array-of-oop.  Heroically
-  // cast the array klass to EXACT array and uncommon-trap if the cast
-  // fails.
+  // The type of array_klass is usually INexact array-of-oop.  Heroically
+  // cast array_klass to EXACT array and uncommon-trap if the cast fails.
+  // Make constant out of the inexact array klass, but use it only if the cast
+  // succeeds.
   bool always_see_exact_class = false;
   if (MonomorphicArrayCheck
-      && !too_many_traps(Deoptimization::Reason_array_check)) {
+      && !too_many_traps(Deoptimization::Reason_array_check)
+      && !tak->klass_is_exact()
+      && tak != TypeKlassPtr::OBJECT) {
+      // Regarding the fourth condition in the if-statement from above:
+      //
+      // If the compiler has determined that the type of array 'ary' (represented
+      // by 'array_klass') is java/lang/Object, the compiler must not assume that
+      // the array 'ary' is monomorphic.
+      //
+      // If 'ary' were of type java/lang/Object, this arraystore would have to fail,
+      // because it is not possible to perform a arraystore into an object that is not
+      // a "proper" array.
+      //
+      // Therefore, let's obtain at runtime the type of 'ary' and check if we can still
+      // successfully perform the store.
+      //
+      // The implementation reasons for the condition are the following:
+      //
+      // java/lang/Object is the superclass of all arrays, but it is represented by the VM
+      // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
+      // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
+      //
+      // See issue JDK-8057622 for details.
+
     always_see_exact_class = true;
     // (If no MDO at all, hope for the best, until a trap actually occurs.)
-  }
 
-  // Is the array klass is exactly its defined type?
-  if (always_see_exact_class && !tak->klass_is_exact()) {
     // Make a constant out of the inexact array klass
     const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
     Node* con = makecon(extak);
@@ -202,11 +223,15 @@
   // Extract the array element class
   int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
   Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
-  Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
+  // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
+  // we must set a control edge from the IfTrue node created by the uncommon_trap above to the
+  // LoadKlassNode.
+  Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
+                                                       immutable_memory(), p2, tak));
 
   // Check (the hard way) and throw if not a subklass.
   // Result is ignored, we just need the CFG effects.
-  gen_checkcast( obj, a_e_klass );
+  gen_checkcast(obj, a_e_klass);
 }
 
 
--- a/src/share/vm/prims/whitebox.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/prims/whitebox.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -300,7 +300,7 @@
 // NMT picks it up correctly
 WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
   jlong addr = 0;
-    addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
+  addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
   return addr;
 WB_END
 
@@ -309,7 +309,7 @@
 WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
   address pc = (address)(size_t)pseudo_stack;
   NativeCallStack stack(&pc, 1);
-  return (jlong)os::malloc(size, mtTest, stack);
+  return (jlong)(uintptr_t)os::malloc(size, mtTest, stack);
 WB_END
 
 // Free the memory allocated by NMTAllocTest
@@ -344,15 +344,6 @@
   return MemTracker::tracking_level() == NMT_detail;
 WB_END
 
-WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
-  address pc = (address)1;
-  for (jlong index = 0; index < num; index ++) {
-    NativeCallStack stack(&pc, 1);
-    os::malloc(0, mtTest, stack);
-    pc += MallocSiteTable::hash_buckets();
-  }
-WB_END
-
 WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
   // Test that we can downgrade NMT levels but not upgrade them.
   if (MemTracker::tracking_level() == NMT_off) {
@@ -383,6 +374,12 @@
     return MemTracker::tracking_level() == NMT_minimal;
   }
 WB_END
+
+WB_ENTRY(jint, WB_NMTGetHashSize(JNIEnv* env, jobject o))
+  int hash_size = MallocSiteTable::hash_buckets();
+  assert(hash_size > 0, "NMT hash_size should be > 0");
+  return (jint)hash_size;
+WB_END
 #endif // INCLUDE_NMT
 
 static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
@@ -981,9 +978,9 @@
   {CC"NMTCommitMemory",     CC"(JJ)V",                (void*)&WB_NMTCommitMemory    },
   {CC"NMTUncommitMemory",   CC"(JJ)V",                (void*)&WB_NMTUncommitMemory  },
   {CC"NMTReleaseMemory",    CC"(JJ)V",                (void*)&WB_NMTReleaseMemory   },
-  {CC"NMTOverflowHashBucket", CC"(J)V",               (void*)&WB_NMTOverflowHashBucket},
   {CC"NMTIsDetailSupported",CC"()Z",                  (void*)&WB_NMTIsDetailSupported},
   {CC"NMTChangeTrackingLevel", CC"()Z",               (void*)&WB_NMTChangeTrackingLevel},
+  {CC"NMTGetHashSize",      CC"()I",                  (void*)&WB_NMTGetHashSize     },
 #endif // INCLUDE_NMT
   {CC"deoptimizeAll",      CC"()V",                   (void*)&WB_DeoptimizeAll     },
   {CC"deoptimizeMethod",   CC"(Ljava/lang/reflect/Executable;Z)I",
--- a/src/share/vm/runtime/os.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/runtime/os.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -571,17 +571,6 @@
   NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
 
-#if INCLUDE_NMT
-  // NMT can not track malloc allocation size > MAX_MALLOC_SIZE, which is
-  // (1GB - 1) on 32-bit system. It is not an issue on 64-bit system, where
-  // MAX_MALLOC_SIZE = ((1 << 62) - 1).
-  // VM code does not have such large malloc allocation. However, it can come
-  // Unsafe call.
-  if (MemTracker::tracking_level() >= NMT_summary && size > MAX_MALLOC_SIZE) {
-    return NULL;
-  }
-#endif
-
 #ifdef ASSERT
   // checking for the WatcherThread and crash_protection first
   // since os::malloc can be called when the libjvm.{dll,so} is
@@ -652,12 +641,6 @@
 }
 
 void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
-#if INCLUDE_NMT
-  // See comments in os::malloc() above
-  if (MemTracker::tracking_level() >= NMT_summary && size > MAX_MALLOC_SIZE) {
-    return NULL;
-  }
-#endif
 
 #ifndef ASSERT
   NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
--- a/src/share/vm/services/mallocTracker.cpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/services/mallocTracker.cpp	Fri Nov 14 07:37:00 2014 -0800
@@ -72,7 +72,7 @@
 
   MallocMemorySummary::record_free(size(), flags());
   MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
-  if (tracking_level() == NMT_detail) {
+  if (MemTracker::tracking_level() == NMT_detail) {
     MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
   }
 }
@@ -128,36 +128,18 @@
   }
 
   // Uses placement global new operator to initialize malloc header
-  switch(level) {
-    case NMT_off:
-      return malloc_base;
-    case NMT_minimal: {
-      MallocHeader* hdr = ::new (malloc_base) MallocHeader();
-      break;
-    }
-    case NMT_summary: {
-      assert(size <= MAX_MALLOC_SIZE, "malloc size overrun for NMT");
-      header = ::new (malloc_base) MallocHeader(size, flags);
-      break;
-    }
-    case NMT_detail: {
-      assert(size <= MAX_MALLOC_SIZE, "malloc size overrun for NMT");
-      header = ::new (malloc_base) MallocHeader(size, flags, stack);
-      break;
-    }
-    default:
-      ShouldNotReachHere();
+
+  if (level == NMT_off) {
+    return malloc_base;
   }
+
+  header = ::new (malloc_base)MallocHeader(size, flags, stack, level);
   memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
 
   // The alignment check: 8 bytes alignment for 32 bit systems.
   //                      16 bytes alignment for 64-bit systems.
   assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
 
-  // Sanity check
-  assert(get_memory_tracking_level(memblock) == level,
-    "Wrong tracking level");
-
 #ifdef ASSERT
   if (level > NMT_minimal) {
     // Read back
--- a/src/share/vm/services/mallocTracker.hpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/services/mallocTracker.hpp	Fri Nov 14 07:37:00 2014 -0800
@@ -239,68 +239,46 @@
 
 class MallocHeader VALUE_OBJ_CLASS_SPEC {
 #ifdef _LP64
-  size_t           _size      : 62;
-  size_t           _level     : 2;
+  size_t           _size      : 64;
   size_t           _flags     : 8;
   size_t           _pos_idx   : 16;
   size_t           _bucket_idx: 40;
 #define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
 #define MAX_BUCKET_LENGTH         ((size_t)(1 << 16))
-#define MAX_MALLOC_SIZE           (((size_t)1 << 62) - 1)
 #else
-  size_t           _size      : 30;
-  size_t           _level     : 2;
+  size_t           _size      : 32;
   size_t           _flags     : 8;
   size_t           _pos_idx   : 8;
   size_t           _bucket_idx: 16;
 #define MAX_MALLOCSITE_TABLE_SIZE  ((size_t)(1 << 16))
 #define MAX_BUCKET_LENGTH          ((size_t)(1 << 8))
-// Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
-#define MAX_MALLOC_SIZE            ((size_t)(1 << 30) - 1)
 #endif  // _LP64
 
  public:
-  // Summary tracking header
-  MallocHeader(size_t size, MEMFLAGS flags) {
-    assert(sizeof(MallocHeader) == sizeof(void*) * 2,
-      "Wrong header size");
-
-    _level = NMT_summary;
-    _flags = flags;
-    set_size(size);
-    MallocMemorySummary::record_malloc(size, flags);
-    MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
-  }
-  // Detail tracking header
-  MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
+  MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
     assert(sizeof(MallocHeader) == sizeof(void*) * 2,
       "Wrong header size");
 
-    _level = NMT_detail;
+    if (level == NMT_minimal) {
+      return;
+    }
+
     _flags = flags;
     set_size(size);
-    size_t bucket_idx;
-    size_t pos_idx;
-    if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
-      assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
-      assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
-      _bucket_idx = bucket_idx;
-      _pos_idx = pos_idx;
+    if (level == NMT_detail) {
+      size_t bucket_idx;
+      size_t pos_idx;
+      if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
+        assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
+        assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
+        _bucket_idx = bucket_idx;
+        _pos_idx = pos_idx;
+      }
     }
+
     MallocMemorySummary::record_malloc(size, flags);
     MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
   }
-  // Minimal tracking header
-  MallocHeader() {
-    assert(sizeof(MallocHeader) == sizeof(void*) * 2,
-      "Wrong header size");
-
-    _level = (unsigned short)NMT_minimal;
-  }
-
-  inline NMT_TrackingLevel tracking_level() const {
-    return (NMT_TrackingLevel)_level;
-  }
 
   inline size_t   size()  const { return _size; }
   inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
@@ -311,7 +289,6 @@
 
  private:
   inline void set_size(size_t size) {
-    assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
     _size = size;
   }
   bool record_malloc_site(const NativeCallStack& stack, size_t size,
@@ -347,10 +324,6 @@
   // Record free on specified memory block
   static void* record_free(void* memblock);
 
-  // Get tracking level of specified memory block
-  static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
-
-
   // Offset memory address to header address
   static inline void* get_base(void* memblock);
   static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
@@ -361,16 +334,12 @@
   // Get memory size
   static inline size_t get_size(void* memblock) {
     MallocHeader* header = malloc_header(memblock);
-    assert(header->tracking_level() >= NMT_summary,
-      "Wrong tracking level");
     return header->size();
   }
 
   // Get memory type
   static inline MEMFLAGS get_flags(void* memblock) {
     MallocHeader* header = malloc_header(memblock);
-    assert(header->tracking_level() >= NMT_summary,
-      "Wrong tracking level");
     return header->flags();
   }
 
@@ -394,7 +363,6 @@
   static inline MallocHeader* malloc_header(void *memblock) {
     assert(memblock != NULL, "NULL pointer");
     MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
-    assert(header->tracking_level() >= NMT_minimal, "Bad header");
     return header;
   }
 };
--- a/src/share/vm/services/mallocTracker.inline.hpp	Wed Nov 12 13:47:19 2014 -0800
+++ b/src/share/vm/services/mallocTracker.inline.hpp	Fri Nov 14 07:37:00 2014 -0800
@@ -28,13 +28,6 @@
 #include "services/mallocTracker.hpp"
 #include "services/memTracker.hpp"
 
-inline NMT_TrackingLevel MallocTracker::get_memory_tracking_level(void* memblock) {
-  assert(memblock != NULL, "Sanity check");
-  if (MemTracker::tracking_level() == NMT_off) return NMT_off;
-  MallocHeader* header = malloc_header(memblock);
-  return header->tracking_level();
-}
-
 inline void* MallocTracker::get_base(void* memblock){
   return get_base(memblock, MemTracker::tracking_level());
 }
--- a/test/TEST.ROOT	Wed Nov 12 13:47:19 2014 -0800
+++ b/test/TEST.ROOT	Fri Nov 14 07:37:00 2014 -0800
@@ -30,3 +30,4 @@
 keys=cte_test jcmd nmt regression gc stress
 
 groups=TEST.groups [closed/TEST.groups]
+requires.properties=sun.arch.data.model
--- a/test/TEST.groups	Wed Nov 12 13:47:19 2014 -0800
+++ b/test/TEST.groups	Fri Nov 14 07:37:00 2014 -0800
@@ -90,7 +90,6 @@
   runtime/NMT/SummarySanityCheck.java \
   runtime/NMT/ThreadedMallocTestType.java \
   runtime/NMT/ThreadedVirtualAllocTestType.java \
-  runtime/NMT/UnsafeMallocLimit.java \
   runtime/NMT/VirtualAllocCommitUncommitRecommit.java \
   runtime/NMT/VirtualAllocTestType.java \
   runtime/RedefineObject/TestRedefineObject.java \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/concurrentMarkSweep/DisableResizePLAB.java	Fri Nov 14 07:37:00 2014 -0800
@@ -0,0 +1,44 @@
+/*
+* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test DisableResizePLAB
+ * @key gc
+ * @bug 8060467
+ * @author filipp.zhinkin@oracle.com, john.coomes@oracle.com
+ * @summary Run CMS with PLAB resizing disabled and a small OldPLABSize
+ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -XX:+PrintGCDetails DisableResizePLAB
+ */
+
+public class DisableResizePLAB {
+    public static void main(String args[]) throws Exception {
+        Object garbage[] = new Object[1_000];
+        for (int i = 0; i < garbage.length; i++) {
+            garbage[i] = new byte[0];
+        }
+        long startTime = System.currentTimeMillis();
+        while (System.currentTimeMillis() - startTime < 10_000) {
+            Object o = new byte[1024];
+        }
+    }
+}
--- a/test/runtime/NMT/MallocSiteHashOverflow.java	Wed Nov 12 13:47:19 2014 -0800
+++ b/test/runtime/NMT/MallocSiteHashOverflow.java	Fri Nov 14 07:37:00 2014 -0800
@@ -24,41 +24,56 @@
 /*
  * @test
  * @summary Test corner case that overflows malloc site hashtable bucket
+ * @requires sun.arch.data.model == "32"
  * @key nmt jcmd stress
  * @library /testlibrary /testlibrary/whitebox
- * @ignore - This test is disabled since it will stress NMT and timeout during normal testing
+ * @ignore 8062870
  * @build MallocSiteHashOverflow
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm/timeout=480 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocSiteHashOverflow
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocSiteHashOverflow
  */
 
 import com.oracle.java.testlibrary.*;
 import sun.hotspot.WhiteBox;
 
 public class MallocSiteHashOverflow {
-    private static long K = 1024;
+
     public static void main(String args[]) throws Exception {
-        String vm_name = System.getProperty("java.vm.name");
 
+        // Size of entries based on malloc tracking header defined in mallocTracker.hpp
         // For 32-bit systems, create 257 malloc sites with the same hash bucket to overflow a hash bucket
-        // For 64-bit systems, create 64K + 1 malloc sites with the same hash bucket to overflow a hash bucket
         long entries = 257;
-        if (Platform.is64bit()) {
-            entries = 64 * K + 1;
-        }
 
         OutputAnalyzer output;
         WhiteBox wb = WhiteBox.getWhiteBox();
+        int MAX_HASH_SIZE = wb.NMTGetHashSize();
 
         // Grab my own PID
         String pid = Integer.toString(ProcessTools.getProcessId());
         ProcessBuilder pb = new ProcessBuilder();
 
-        wb.NMTOverflowHashBucket(entries);
-
-        // Run 'jcmd <pid> VM.native_memory summary'
+        // Verify that current tracking level is "detail"
         pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
         output = new OutputAnalyzer(pb.start());
-        output.shouldContain("Tracking level has been downgraded due to lack of resources");
+        output.shouldContain("Native Memory Tracking Statistics");
+
+        // Attempt to cause NMT to downgrade tracking level by allocating small amounts
+        // of memory with random pseudo call stack
+        int pc = 1;
+        for (int i = 0; i < entries; i++) {
+            long addr = wb.NMTMallocWithPseudoStack(1, pc);
+            if (addr == 0) {
+                throw new RuntimeException("NMTMallocWithPseudoStack: out of memory");
+            }
+            // We free memory here since it doesn't affect pseudo malloc alloc site hash table entries
+            wb.NMTFree(addr);
+            pc += MAX_HASH_SIZE;
+            if (i == entries) {
+                // Verify that tracking has been downgraded
+                pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
+                output = new OutputAnalyzer(pb.start());
+                output.shouldContain("Tracking level has been downgraded due to lack of resources");
+            }
+        }
     }
 }
--- a/test/runtime/NMT/UnsafeMallocLimit.java	Wed Nov 12 13:47:19 2014 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @bug 8055289
- * @library /testlibrary
- * @build UnsafeMallocLimit
- * @run main/othervm -Xmx32m -XX:NativeMemoryTracking=summary UnsafeMallocLimit
- */
-
-import com.oracle.java.testlibrary.*;
-import sun.misc.Unsafe;
-
-public class UnsafeMallocLimit {
-
-    public static void main(String args[]) throws Exception {
-        if (Platform.is32bit()) {
-            Unsafe unsafe = Utils.getUnsafe();
-            try {
-                unsafe.allocateMemory(1 << 30);
-                throw new RuntimeException("Did not get expected OOME");
-            } catch (OutOfMemoryError e) {
-                // Expected exception
-            }
-        } else {
-            System.out.println("Test only valid on 32-bit platforms");
-        }
-    }
-}
--- a/test/runtime/NMT/UnsafeMallocLimit2.java	Wed Nov 12 13:47:19 2014 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @bug 8058818
- * @library /testlibrary
- * @build UnsafeMallocLimit2
- * @run main/othervm -Xmx32m -XX:NativeMemoryTracking=off UnsafeMallocLimit2
- */
-
-import com.oracle.java.testlibrary.*;
-import sun.misc.Unsafe;
-
-public class UnsafeMallocLimit2 {
-
-    public static void main(String args[]) throws Exception {
-        if (Platform.is32bit()) {
-            Unsafe unsafe = Utils.getUnsafe();
-            try {
-                // Allocate greater than MALLOC_MAX and likely won't fail to allocate,
-                // so it hits the NMT code that asserted.
-                // Test that this doesn't cause an assertion with NMT off.
-                // The option above overrides if all the tests are run with NMT on.
-                unsafe.allocateMemory(0x40000000);
-                System.out.println("Allocation succeeded");
-            } catch (OutOfMemoryError e) {
-                System.out.println("Allocation failed");
-            }
-        } else {
-            System.out.println("Test only valid on 32-bit platforms");
-        }
-    }
-}
--- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Nov 12 13:47:19 2014 -0800
+++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Nov 14 07:37:00 2014 -0800
@@ -104,10 +104,10 @@
   public native void NMTCommitMemory(long addr, long size);
   public native void NMTUncommitMemory(long addr, long size);
   public native void NMTReleaseMemory(long addr, long size);
-  public native void NMTOverflowHashBucket(long num);
   public native long NMTMallocWithPseudoStack(long size, int index);
   public native boolean NMTIsDetailSupported();
   public native boolean NMTChangeTrackingLevel();
+  public native int NMTGetHashSize();
 
   // Compiler
   public native void    deoptimizeAll();
@@ -208,4 +208,13 @@
                        .findAny()
                        .orElse(null);
   }
+  public native int getOffsetForName0(String name);
+  public int getOffsetForName(String name) throws Exception {
+    int offset = getOffsetForName0(name);
+    if (offset == -1) {
+      throw new RuntimeException(name + " not found");
+    }
+    return offset;
+  }
+
 }