changeset 12116:1624a68007bd

Merge
author jmasa
date Tue, 27 Aug 2013 18:55:33 -0700
parents 21ffbaa691b5 (diff) ec145d04eda8 (current diff)
children 62f527c674d2 0d59407e7e09
files src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
diffstat 36 files changed, 1093 insertions(+), 391 deletions(-) [+]
line wrap: on
line diff
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Fri Aug 23 15:59:20 2013 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Tue Aug 27 18:55:33 2013 -0700
@@ -35,8 +35,9 @@
 sapkg.code = sapkg.hotspot.code;
 sapkg.compiler = sapkg.hotspot.compiler;
 
-// 'debugger' is a JavaScript keyword :-(
-// sapkg.debugger = sapkg.hotspot.debugger;
+// 'debugger' is a JavaScript keyword, but ES5 relaxes the
+// restriction of using keywords as property name
+sapkg.debugger = sapkg.hotspot.debugger;
 
 sapkg.interpreter = sapkg.hotspot.interpreter;
 sapkg.jdi = sapkg.hotspot.jdi;
@@ -116,27 +117,36 @@
       return args;
     }
 
+    // Handle __has__ specially to avoid metacircularity problems
+    // when called from __get__.
+    // Calling
+    //   this.__has__(name)
+    // will in turn call
+    //   this.__call__('__has__', name)
+    // which is not handled below
+    function __has__(name) {
+      if (typeof(name) == 'number') {
+        return so["has(int)"](name);
+      } else {
+        if (name == '__wrapped__') {
+          return true;
+        } else if (so["has(java.lang.String)"](name)) {
+          return true;
+        } else if (name.equals('toString')) {
+          return true;
+        } else {
+          return false;
+        }
+      }
+    }
+
     if (so instanceof sapkg.utilities.soql.ScriptObject) {
       return new JSAdapter() {
-        __getIds__: function() {                  
-          return so.getIds();         
+        __getIds__: function() {
+          return so.getIds();
         },
   
-        __has__ : function(name) {
-          if (typeof(name) == 'number') {
-            return so["has(int)"](name);
-          } else {
-            if (name == '__wrapped__') {
-              return true;
-            } else if (so["has(java.lang.String)"](name)) {
-              return true;
-            } else if (name.equals('toString')) {
-              return true;
-            } else {
-              return false;
-            }
-          }
-        },
+        __has__ : __has__,
   
         __delete__ : function(name) {
           if (typeof(name) == 'number') {
@@ -147,7 +157,8 @@
         },
   
         __get__ : function(name) {
-          if (! this.__has__(name)) {
+	      // don't call this.__has__(name); see comments above function __has__
+          if (! __has__.call(this, name)) {
             return undefined;
           }
           if (typeof(name) == 'number') {
@@ -162,7 +173,7 @@
                   var args = prepareArgsArray(arguments);
                   var r;
                   try {
-                    r = value.call(args);
+                    r = value.call(Java.to(args, 'java.lang.Object[]'));
                   } catch (e) {
                     println("call to " + name + " failed!");
                     throw e;
@@ -204,6 +215,18 @@
   }
 
   // define "writeln" and "write" if not defined
+  if (typeof(println) == 'undefined') {
+    println = function (str) {
+      java.lang.System.out.println(String(str));
+    }
+  }
+
+  if (typeof(print) == 'undefined') {
+    print = function (str) {
+      java.lang.System.out.print(String(str));
+    }
+  }
+
   if (typeof(writeln) == 'undefined') {
     writeln = println;
   }
@@ -235,7 +258,7 @@
 
     this.jclasses = function() {
       forEachKlass(function (clazz) {
-        writeln(clazz.getName().asString() + " @" + clazz.getHandle().toString()); 
+        writeln(clazz.getName().asString() + " @" + clazz.getAddress().toString()); 
       });
     }
     registerCommand("classes", "classes", "jclasses");
@@ -490,14 +513,14 @@
 function forEachKlass(callback) {
    var VisitorClass = sapkg.memory.SystemDictionary.ClassVisitor;
    var visitor = new VisitorClass() { visit: callback };
-   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary$ClassVisitor)"](visitor);
+   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary.ClassVisitor)"](visitor);
 }
 
 // iterate system dictionary for each 'Klass' and initiating loader
 function forEachKlassAndLoader(callback) {
    var VisitorClass = sapkg.memory.SystemDictionary.ClassAndLoaderVisitor;
    var visitor = new VisitorClass() { visit: callback };
-   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary$ClassAndLoaderVisitor)"](visitor);
+   sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary.ClassAndLoaderVisitor)"](visitor);
 }
 
 // iterate system dictionary for each primitive array klass
@@ -522,7 +545,12 @@
 
 // iterates Java heap for each Oop
 function forEachOop(callback) {
-   sa.objHeap.iterate(new sapkg.oops.HeapVisitor() { doObj: callback });
+   function empty() { }
+   sa.objHeap.iterate(new sapkg.oops.HeapVisitor() {
+       prologue: empty,
+       doObj: callback,
+       epilogue: empty
+   });
 }
 
 // iterates Java heap for each Oop of given 'klass'.
@@ -536,8 +564,14 @@
    if (includeSubtypes == undefined) {
       includeSubtypes = true;
    }
+
+   function empty() { }
    sa.objHeap.iterateObjectsOfKlass(
-        new sapkg.oops.HeapVisitor() { doObj: callback },
+        new sapkg.oops.HeapVisitor() {
+            prologue: empty,
+            doObj: callback,
+            epilogue: empty
+        },
         klass, includeSubtypes);
 }
 
@@ -746,9 +780,9 @@
          // ignore;
          continue;
    } else {
-      // some type names have ':'. replace to make it as a 
+      // some type names have ':', '<', '>', '*', ' '. replace to make it as a
       // JavaScript identifier
-      tmp.name = tmp.name.replace(':', '_').replace('<', '_').replace('>', '_').replace('*', '_').replace(' ', '_');
+      tmp.name = ("" + tmp.name).replace(/[:<>* ]/g, '_');
       eval("function read" + tmp.name + "(addr) {" +
            "   return readVMType('" + tmp.name + "', addr);}"); 
       eval("function print" + tmp.name + "(addr) {" + 
--- a/make/linux/makefiles/amd64.make	Fri Aug 23 15:59:20 2013 -0700
+++ b/make/linux/makefiles/amd64.make	Tue Aug 27 18:55:33 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -31,9 +31,4 @@
 
 CFLAGS += -D_LP64=1
 
-# The serviceability agent relies on frame pointer (%rbp) to walk thread stack
-ifndef USE_SUNCC
-  CFLAGS += -fno-omit-frame-pointer
-endif
-
 OPT_CFLAGS/compactingPermGenGen.o = -O1
--- a/make/linux/makefiles/gcc.make	Fri Aug 23 15:59:20 2013 -0700
+++ b/make/linux/makefiles/gcc.make	Tue Aug 27 18:55:33 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -398,3 +398,10 @@
 ifdef MINIMIZE_RAM_USAGE
 CFLAGS += -DMINIMIZE_RAM_USAGE
 endif
+
+# Stack walking in the JVM relies on frame pointer (%rbp) to walk thread stack.
+# Explicitly specify -fno-omit-frame-pointer because it is off by default
+# starting with gcc 4.6.
+ifndef USE_SUNCC
+  CFLAGS += -fno-omit-frame-pointer
+endif
--- a/make/windows/build_vm_def.sh	Fri Aug 23 15:59:20 2013 -0700
+++ b/make/windows/build_vm_def.sh	Tue Aug 27 18:55:33 2013 -0700
@@ -42,8 +42,6 @@
  MKS_HOME=`dirname "$SH"`
 fi
 
-echo "EXPORTS" > vm1.def
-
 AWK="$MKS_HOME/awk.exe"
 if [ ! -e $AWK ]; then
     AWK="$MKS_HOME/gawk.exe"
@@ -55,6 +53,22 @@
 RM="$MKS_HOME/rm.exe"
 DUMPBIN="link.exe /dump"
 
+if [ "$1" = "-nosa" ]; then
+    echo EXPORTS > vm.def
+    echo ""
+    echo "***"
+    echo "*** Not building SA: BUILD_WIN_SA != 1"
+    echo "*** C++ Vtables NOT included in vm.def"
+    echo "*** This jvm.dll will NOT work properly with SA."
+    echo "***"
+    echo "*** When in doubt, set BUILD_WIN_SA=1, clean and rebuild."
+    echo "***"
+    echo ""
+    exit
+fi
+
+echo "EXPORTS" > vm1.def
+
 # When called from IDE the first param should contain the link version, otherwise may be nill
 if [ "x$1" != "x" ]; then
 LD_VER="$1"
--- a/make/windows/makefiles/debug.make	Fri Aug 23 15:59:20 2013 -0700
+++ b/make/windows/makefiles/debug.make	Tue Aug 27 18:55:33 2013 -0700
@@ -49,9 +49,6 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-vm.def: $(Obj_Files)
-	sh $(WorkSpace)/make/windows/build_vm_def.sh
-
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LD) @<<
   $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
--- a/make/windows/makefiles/fastdebug.make	Fri Aug 23 15:59:20 2013 -0700
+++ b/make/windows/makefiles/fastdebug.make	Tue Aug 27 18:55:33 2013 -0700
@@ -48,9 +48,6 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-vm.def: $(Obj_Files)
-	sh $(WorkSpace)/make/windows/build_vm_def.sh
-
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LD) @<<
   $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
--- a/make/windows/makefiles/product.make	Fri Aug 23 15:59:20 2013 -0700
+++ b/make/windows/makefiles/product.make	Tue Aug 27 18:55:33 2013 -0700
@@ -51,9 +51,6 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-vm.def: $(Obj_Files)
-	sh $(WorkSpace)/make/windows/build_vm_def.sh
-
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LD) @<<
   $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
--- a/make/windows/makefiles/projectcreator.make	Fri Aug 23 15:59:20 2013 -0700
+++ b/make/windows/makefiles/projectcreator.make	Tue Aug 27 18:55:33 2013 -0700
@@ -92,6 +92,10 @@
         -disablePch        getThread_windows_$(Platform_arch).cpp \
         -disablePch_compiler2     opcodes.cpp
 
+!if "$(BUILD_WIN_SA)" != "1"
+BUILD_VM_DEF_FLAG=-nosa
+!endif
+
 # Common options for the IDE builds for c1, and c2
 ProjectCreatorIDEOptions=\
         $(ProjectCreatorIDEOptions) \
@@ -104,7 +108,7 @@
         -jdkTargetRoot $(HOTSPOTJDKDIST) \
         -define ALIGN_STACK_FRAMES \
         -define VM_LITTLE_ENDIAN \
-        -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	set JAVA_HOME=$(HOTSPOTJDKDIST)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LD_VER)" \
+        -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	set JAVA_HOME=$(HOTSPOTJDKDIST)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(BUILD_VM_DEF_FLAG) $(LD_VER)" \
         -ignoreFile jsig.c \
         -ignoreFile jvmtiEnvRecommended.cpp \
         -ignoreFile jvmtiEnvStub.cpp \
--- a/make/windows/makefiles/vm.make	Fri Aug 23 15:59:20 2013 -0700
+++ b/make/windows/makefiles/vm.make	Tue Aug 27 18:55:33 2013 -0700
@@ -393,3 +393,11 @@
 _build_pch_file.obj:
         @echo #include "precompiled.hpp" > ../generated/_build_pch_file.cpp
         $(CXX) $(CXX_FLAGS) /Fp"vm.pch" /Yc"precompiled.hpp" /c ../generated/_build_pch_file.cpp
+
+!if "$(BUILD_WIN_SA)" != "1"
+BUILD_VM_DEF_FLAG=-nosa
+!endif
+
+vm.def: $(Obj_Files)
+	sh $(WorkSpace)/make/windows/build_vm_def.sh $(BUILD_VM_DEF_FLAG)
+
--- a/src/os/bsd/vm/os_bsd.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/os/bsd/vm/os_bsd.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -642,13 +642,14 @@
 #endif
 
 #ifdef __APPLE__
-static uint64_t locate_unique_thread_id() {
+static uint64_t locate_unique_thread_id(mach_port_t mach_thread_port) {
   // Additional thread_id used to correlate threads in SA
   thread_identifier_info_data_t     m_ident_info;
   mach_msg_type_number_t            count = THREAD_IDENTIFIER_INFO_COUNT;
 
-  thread_info(::mach_thread_self(), THREAD_IDENTIFIER_INFO,
+  thread_info(mach_thread_port, THREAD_IDENTIFIER_INFO,
               (thread_info_t) &m_ident_info, &count);
+
   return m_ident_info.thread_id;
 }
 #endif
@@ -679,9 +680,14 @@
   }
 
 #ifdef __APPLE__
-  // thread_id is mach thread on macos
-  osthread->set_thread_id(::mach_thread_self());
-  osthread->set_unique_thread_id(locate_unique_thread_id());
+  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
+  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
+  guarantee(thread_id != 0, "thread id missing from pthreads");
+  osthread->set_thread_id(thread_id);
+
+  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
+  guarantee(unique_thread_id != 0, "unique thread id was not found");
+  osthread->set_unique_thread_id(unique_thread_id);
 #else
   // thread_id is pthread_id on BSD
   osthread->set_thread_id(::pthread_self());
@@ -843,8 +849,14 @@
 
   // Store pthread info into the OSThread
 #ifdef __APPLE__
-  osthread->set_thread_id(::mach_thread_self());
-  osthread->set_unique_thread_id(locate_unique_thread_id());
+  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
+  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
+  guarantee(thread_id != 0, "just checking");
+  osthread->set_thread_id(thread_id);
+
+  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
+  guarantee(unique_thread_id != 0, "just checking");
+  osthread->set_unique_thread_id(unique_thread_id);
 #else
   osthread->set_thread_id(::pthread_self());
 #endif
@@ -1115,7 +1127,7 @@
 
 intx os::current_thread_id() {
 #ifdef __APPLE__
-  return (intx)::mach_thread_self();
+  return (intx)::pthread_mach_thread_np(::pthread_self());
 #else
   return (intx)::pthread_self();
 #endif
@@ -2313,7 +2325,9 @@
 }
 
 
-char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  fatal("This code is not used or maintained.");
+
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseSHM, "only for SHM large pages");
@@ -3275,11 +3289,15 @@
     // and if UserSignalHandler is installed all bets are off
     if (CheckJNICalls) {
       if (libjsig_is_loaded) {
-        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
+        if (PrintJNIResolving) {
+          tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
+        }
         check_signals = false;
       }
       if (AllowUserSignalHandlers) {
-        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
+        if (PrintJNIResolving) {
+          tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
+        }
         check_signals = false;
       }
     }
@@ -4736,3 +4754,8 @@
   return n;
 }
 
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/src/os/linux/vm/globals_linux.hpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/os/linux/vm/globals_linux.hpp	Tue Aug 27 18:55:33 2013 -0700
@@ -40,6 +40,9 @@
   product(bool, UseHugeTLBFS, false,                                    \
           "Use MAP_HUGETLB for large pages")                            \
                                                                         \
+  product(bool, UseTransparentHugePages, false,                         \
+          "Use MADV_HUGEPAGE for large pages")                          \
+                                                                        \
   product(bool, LoadExecStackDllInVMThread, true,                       \
           "Load DLLs with executable-stack attribute in the VM Thread") \
                                                                         \
--- a/src/os/linux/vm/os_linux.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -2720,36 +2720,7 @@
 
 int os::Linux::commit_memory_impl(char* addr, size_t size,
                                   size_t alignment_hint, bool exec) {
-  int err;
-  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
-    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
-    uintptr_t res =
-      (uintptr_t) ::mmap(addr, size, prot,
-                         MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
-                         -1, 0);
-    if (res != (uintptr_t) MAP_FAILED) {
-      if (UseNUMAInterleaving) {
-        numa_make_global(addr, size);
-      }
-      return 0;
-    }
-
-    err = errno;  // save errno from mmap() call above
-
-    if (!recoverable_mmap_error(err)) {
-      // However, it is not clear that this loss of our reserved mapping
-      // happens with large pages on Linux or that we cannot recover
-      // from the loss. For now, we just issue a warning and we don't
-      // call vm_exit_out_of_memory(). This issue is being tracked by
-      // JBS-8007074.
-      warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
-//    vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
-//                          "committing reserved memory.");
-    }
-    // Fall through and try to use small pages
-  }
-
-  err = os::Linux::commit_memory_impl(addr, size, exec);
+  int err = os::Linux::commit_memory_impl(addr, size, exec);
   if (err == 0) {
     realign_memory(addr, size, alignment_hint);
   }
@@ -2774,7 +2745,7 @@
 }
 
 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
-  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
+  if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
     // be supported or the memory may already be backed by huge pages.
     ::madvise(addr, bytes, MADV_HUGEPAGE);
@@ -2787,7 +2758,7 @@
   // uncommitted at all. We don't do anything in this case to avoid creating a segment with
   // small pages on top of the SHM segment. This method always works for small pages, so we
   // allow that in any case.
-  if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
+  if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
     commit_memory(addr, bytes, alignment_hint, !ExecMem);
   }
 }
@@ -3157,11 +3128,31 @@
   return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
 }
 
+bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
+  bool result = false;
+  void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
+                 MAP_ANONYMOUS|MAP_PRIVATE,
+                 -1, 0);
+  if (p != MAP_FAILED) {
+    void *aligned_p = align_ptr_up(p, page_size);
+
+    result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
+
+    munmap(p, page_size * 2);
+  }
+
+  if (warn && !result) {
+    warning("TransparentHugePages is not supported by the operating system.");
+  }
+
+  return result;
+}
+
 bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
   bool result = false;
-  void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
-                  MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
-                  -1, 0);
+  void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
+                 MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
+                 -1, 0);
 
   if (p != MAP_FAILED) {
     // We don't know if this really is a huge page or not.
@@ -3182,12 +3173,10 @@
       }
       fclose(fp);
     }
-    munmap (p, page_size);
-    if (result)
-      return true;
-  }
-
-  if (warn) {
+    munmap(p, page_size);
+  }
+
+  if (warn && !result) {
     warning("HugeTLBFS is not supported by the operating system.");
   }
 
@@ -3235,82 +3224,114 @@
 
 static size_t _large_page_size = 0;
 
-void os::large_page_init() {
-  if (!UseLargePages) {
-    UseHugeTLBFS = false;
-    UseSHM = false;
-    return;
-  }
-
-  if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
-    // If UseLargePages is specified on the command line try both methods,
-    // if it's default, then try only HugeTLBFS.
-    if (FLAG_IS_DEFAULT(UseLargePages)) {
-      UseHugeTLBFS = true;
-    } else {
-      UseHugeTLBFS = UseSHM = true;
-    }
-  }
-
-  if (LargePageSizeInBytes) {
-    _large_page_size = LargePageSizeInBytes;
-  } else {
-    // large_page_size on Linux is used to round up heap size. x86 uses either
-    // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
-    // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
-    // page as large as 256M.
-    //
-    // Here we try to figure out page size by parsing /proc/meminfo and looking
-    // for a line with the following format:
-    //    Hugepagesize:     2048 kB
-    //
-    // If we can't determine the value (e.g. /proc is not mounted, or the text
-    // format has been changed), we'll use the largest page size supported by
-    // the processor.
+size_t os::Linux::find_large_page_size() {
+  size_t large_page_size = 0;
+
+  // large_page_size on Linux is used to round up heap size. x86 uses either
+  // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
+  // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
+  // page as large as 256M.
+  //
+  // Here we try to figure out page size by parsing /proc/meminfo and looking
+  // for a line with the following format:
+  //    Hugepagesize:     2048 kB
+  //
+  // If we can't determine the value (e.g. /proc is not mounted, or the text
+  // format has been changed), we'll use the largest page size supported by
+  // the processor.
 
 #ifndef ZERO
-    _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
-                       ARM_ONLY(2 * M) PPC_ONLY(4 * M);
+  large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
+                     ARM_ONLY(2 * M) PPC_ONLY(4 * M);
 #endif // ZERO
 
-    FILE *fp = fopen("/proc/meminfo", "r");
-    if (fp) {
-      while (!feof(fp)) {
-        int x = 0;
-        char buf[16];
-        if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
-          if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
-            _large_page_size = x * K;
-            break;
-          }
-        } else {
-          // skip to next line
-          for (;;) {
-            int ch = fgetc(fp);
-            if (ch == EOF || ch == (int)'\n') break;
-          }
+  FILE *fp = fopen("/proc/meminfo", "r");
+  if (fp) {
+    while (!feof(fp)) {
+      int x = 0;
+      char buf[16];
+      if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
+        if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
+          large_page_size = x * K;
+          break;
+        }
+      } else {
+        // skip to next line
+        for (;;) {
+          int ch = fgetc(fp);
+          if (ch == EOF || ch == (int)'\n') break;
         }
       }
-      fclose(fp);
     }
-  }
-
-  // print a warning if any large page related flag is specified on command line
-  bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
-
+    fclose(fp);
+  }
+
+  if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
+    warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
+        SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
+        proper_unit_for_byte_size(large_page_size));
+  }
+
+  return large_page_size;
+}
+
+size_t os::Linux::setup_large_page_size() {
+  _large_page_size = Linux::find_large_page_size();
   const size_t default_page_size = (size_t)Linux::page_size();
   if (_large_page_size > default_page_size) {
     _page_sizes[0] = _large_page_size;
     _page_sizes[1] = default_page_size;
     _page_sizes[2] = 0;
   }
-  UseHugeTLBFS = UseHugeTLBFS &&
-                 Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
-
-  if (UseHugeTLBFS)
+
+  return _large_page_size;
+}
+
+bool os::Linux::setup_large_page_type(size_t page_size) {
+  if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
+      FLAG_IS_DEFAULT(UseSHM) &&
+      FLAG_IS_DEFAULT(UseTransparentHugePages)) {
+    // If UseLargePages is specified on the command line try all methods,
+    // if it's default, then try only UseTransparentHugePages.
+    if (FLAG_IS_DEFAULT(UseLargePages)) {
+      UseTransparentHugePages = true;
+    } else {
+      UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
+    }
+  }
+
+  if (UseTransparentHugePages) {
+    bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
+    if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
+      UseHugeTLBFS = false;
+      UseSHM = false;
+      return true;
+    }
+    UseTransparentHugePages = false;
+  }
+
+  if (UseHugeTLBFS) {
+    bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
+    if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
+      UseSHM = false;
+      return true;
+    }
+    UseHugeTLBFS = false;
+  }
+
+  return UseSHM;
+}
+
+void os::large_page_init() {
+  if (!UseLargePages) {
+    UseHugeTLBFS = false;
+    UseTransparentHugePages = false;
     UseSHM = false;
-
-  UseLargePages = UseHugeTLBFS || UseSHM;
+    return;
+  }
+
+  size_t large_page_size = Linux::setup_large_page_size();
+  UseLargePages          = Linux::setup_large_page_type(large_page_size);
 
   set_coredump_filter();
 }
@@ -3319,16 +3340,22 @@
 #define SHM_HUGETLB 04000
 #endif
 
-char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseSHM, "only for SHM large pages");
+  assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+
+  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+    return NULL; // Fallback to small pages.
+  }
 
   key_t key = IPC_PRIVATE;
   char *addr;
 
   bool warn_on_failure = UseLargePages &&
                         (!FLAG_IS_DEFAULT(UseLargePages) ||
+                         !FLAG_IS_DEFAULT(UseSHM) ||
                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
                         );
   char msg[128];
@@ -3376,42 +3403,219 @@
      return NULL;
   }
 
-  if ((addr != NULL) && UseNUMAInterleaving) {
-    numa_make_global(addr, bytes);
-  }
-
-  // The memory is committed
-  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+  return addr;
+}
+
+static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
+  assert(error == ENOMEM, "Only expect to fail if no memory is available");
+
+  bool warn_on_failure = UseLargePages &&
+      (!FLAG_IS_DEFAULT(UseLargePages) ||
+       !FLAG_IS_DEFAULT(UseHugeTLBFS) ||
+       !FLAG_IS_DEFAULT(LargePageSizeInBytes));
+
+  if (warn_on_failure) {
+    char msg[128];
+    jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
+        PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
+    warning(msg);
+  }
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
+  assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
+  assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
+  assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+
+  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+  char* addr = (char*)::mmap(req_addr, bytes, prot,
+                             MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
+                             -1, 0);
+
+  if (addr == MAP_FAILED) {
+    warn_on_large_pages_failure(req_addr, bytes, errno);
+    return NULL;
+  }
+
+  assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
 
   return addr;
 }
 
+char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  size_t large_page_size = os::large_page_size();
+
+  assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
+
+  // Allocate small pages.
+
+  char* start;
+  if (req_addr != NULL) {
+    assert(is_ptr_aligned(req_addr, alignment), "Must be");
+    assert(is_size_aligned(bytes, alignment), "Must be");
+    start = os::reserve_memory(bytes, req_addr);
+    assert(start == NULL || start == req_addr, "Must be");
+  } else {
+    start = os::reserve_memory_aligned(bytes, alignment);
+  }
+
+  if (start == NULL) {
+    return NULL;
+  }
+
+  assert(is_ptr_aligned(start, alignment), "Must be");
+
+  // os::reserve_memory_special will record this memory area.
+  // Need to release it here to prevent overlapping reservations.
+  MemTracker::record_virtual_memory_release((address)start, bytes);
+
+  char* end = start + bytes;
+
+  // Find the regions of the allocated chunk that can be promoted to large pages.
+  char* lp_start = (char*)align_ptr_up(start, large_page_size);
+  char* lp_end   = (char*)align_ptr_down(end, large_page_size);
+
+  size_t lp_bytes = lp_end - lp_start;
+
+  assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
+
+  if (lp_bytes == 0) {
+    // The mapped region doesn't even span the start and the end of a large page.
+    // Fall back to allocate a non-special area.
+    ::munmap(start, end - start);
+    return NULL;
+  }
+
+  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+
+
+  void* result;
+
+  if (start != lp_start) {
+    result = ::mmap(start, lp_start - start, prot,
+                    MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+                    -1, 0);
+    if (result == MAP_FAILED) {
+      ::munmap(lp_start, end - lp_start);
+      return NULL;
+    }
+  }
+
+  result = ::mmap(lp_start, lp_bytes, prot,
+                  MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
+                  -1, 0);
+  if (result == MAP_FAILED) {
+    warn_on_large_pages_failure(req_addr, bytes, errno);
+    // If the mmap above fails, the large pages region will be unmapped and we
+    // have regions before and after with small pages. Release these regions.
+    //
+    // |  mapped  |  unmapped  |  mapped  |
+    // ^          ^            ^          ^
+    // start      lp_start     lp_end     end
+    //
+    ::munmap(start, lp_start - start);
+    ::munmap(lp_end, end - lp_end);
+    return NULL;
+  }
+
+  if (lp_end != end) {
+      result = ::mmap(lp_end, end - lp_end, prot,
+                      MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+                      -1, 0);
+    if (result == MAP_FAILED) {
+      ::munmap(start, lp_end - start);
+      return NULL;
+    }
+  }
+
+  return start;
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
+  assert(is_ptr_aligned(req_addr, alignment), "Must be");
+  assert(is_power_of_2(alignment), "Must be");
+  assert(is_power_of_2(os::large_page_size()), "Must be");
+  assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
+
+  if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
+    return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
+  } else {
+    return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
+  }
+}
+
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  assert(UseLargePages, "only for large pages");
+
+  char* addr;
+  if (UseSHM) {
+    addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
+  } else {
+    assert(UseHugeTLBFS, "must be");
+    addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
+  }
+
+  if (addr != NULL) {
+    if (UseNUMAInterleaving) {
+      numa_make_global(addr, bytes);
+    }
+
+    // The memory is committed
+    MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+  }
+
+  return addr;
+}
+
+bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
+  // detaching the SHM segment will also delete it, see reserve_memory_special_shm()
+  return shmdt(base) == 0;
+}
+
+bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
+  return pd_release_memory(base, bytes);
+}
+
 bool os::release_memory_special(char* base, size_t bytes) {
+  assert(UseLargePages, "only for large pages");
+
   MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  // detaching the SHM segment will also delete it, see reserve_memory_special()
-  int rslt = shmdt(base);
-  if (rslt == 0) {
+
+  bool res;
+  if (UseSHM) {
+    res = os::Linux::release_memory_special_shm(base, bytes);
+  } else {
+    assert(UseHugeTLBFS, "must be");
+    res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
+  }
+
+  if (res) {
     tkr.record((address)base, bytes);
-    return true;
   } else {
     tkr.discard();
-    return false;
-  }
+  }
+
+  return res;
 }
 
 size_t os::large_page_size() {
   return _large_page_size;
 }
 
-// HugeTLBFS allows application to commit large page memory on demand;
-// with SysV SHM the entire memory region must be allocated as shared
+// With SysV SHM the entire memory region must be allocated as shared
 // memory.
+// HugeTLBFS allows application to commit large page memory on demand.
+// However, when committing memory with HugeTLBFS fails, the region
+// that was supposed to be committed will lose the old reservation
+// and allow other threads to steal that memory region. Because of this
+// behavior we can't commit HugeTLBFS memory.
 bool os::can_commit_large_page_memory() {
-  return UseHugeTLBFS;
+  return UseTransparentHugePages;
 }
 
 bool os::can_execute_large_page_memory() {
-  return UseHugeTLBFS;
+  return UseTransparentHugePages || UseHugeTLBFS;
 }
 
 // Reserve memory at an arbitrary address, only if that area is
@@ -4563,21 +4767,23 @@
         UseNUMA = false;
       }
     }
-    // With SHM large pages we cannot uncommit a page, so there's not way
+    // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
     // we can make the adaptive lgrp chunk resizing work. If the user specified
-    // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
+    // both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
     // disable adaptive resizing.
-    if (UseNUMA && UseLargePages && UseSHM) {
-      if (!FLAG_IS_DEFAULT(UseNUMA)) {
-        if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
+    if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
+      if (FLAG_IS_DEFAULT(UseNUMA)) {
+        UseNUMA = false;
+      } else {
+        if (FLAG_IS_DEFAULT(UseLargePages) &&
+            FLAG_IS_DEFAULT(UseSHM) &&
+            FLAG_IS_DEFAULT(UseHugeTLBFS)) {
           UseLargePages = false;
         } else {
-          warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
+          warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
           UseAdaptiveSizePolicy = false;
           UseAdaptiveNUMAChunkSizing = false;
         }
-      } else {
-        UseNUMA = false;
       }
     }
     if (!UseNUMA && ForceNUMA) {
@@ -5848,3 +6054,149 @@
 }
 
 #endif // JAVASE_EMBEDDED
+
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define test_log(...) \
+  do {\
+    if (VerboseInternalVMTests) { \
+      tty->print_cr(__VA_ARGS__); \
+      tty->flush(); \
+    }\
+  } while (false)
+
+class TestReserveMemorySpecial : AllStatic {
+ public:
+  static void small_page_write(void* addr, size_t size) {
+    size_t page_size = os::vm_page_size();
+
+    char* end = (char*)addr + size;
+    for (char* p = (char*)addr; p < end; p += page_size) {
+      *p = 1;
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
+
+    char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
+
+    if (addr != NULL) {
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_huge_tlbfs(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_only() {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    size_t lp = os::large_page_size();
+
+    for (size_t size = lp; size <= lp * 10; size += lp) {
+      test_reserve_memory_special_huge_tlbfs_only(size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
+    if (!UseHugeTLBFS) {
+        return;
+    }
+
+    test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
+        size, alignment);
+
+    assert(size >= os::large_page_size(), "Incorrect input to test");
+
+    char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
+
+    if (addr != NULL) {
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_huge_tlbfs(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+      test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
+    }
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs_mixed() {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
+    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
+  }
+
+  static void test_reserve_memory_special_huge_tlbfs() {
+    if (!UseHugeTLBFS) {
+      return;
+    }
+
+    test_reserve_memory_special_huge_tlbfs_only();
+    test_reserve_memory_special_huge_tlbfs_mixed();
+  }
+
+  static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
+    if (!UseSHM) {
+      return;
+    }
+
+    test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
+
+    char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
+
+    if (addr != NULL) {
+      assert(is_ptr_aligned(addr, alignment), "Check");
+      assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
+
+      small_page_write(addr, size);
+
+      os::Linux::release_memory_special_shm(addr, size);
+    }
+  }
+
+  static void test_reserve_memory_special_shm() {
+    size_t lp = os::large_page_size();
+    size_t ag = os::vm_allocation_granularity();
+
+    for (size_t size = ag; size < lp * 3; size += ag) {
+      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+        test_reserve_memory_special_shm(size, alignment);
+      }
+    }
+  }
+
+  static void test() {
+    test_reserve_memory_special_huge_tlbfs();
+    test_reserve_memory_special_shm();
+  }
+};
+
+void TestReserveMemorySpecial_test() {
+  TestReserveMemorySpecial::test();
+}
+
+#endif
--- a/src/os/linux/vm/os_linux.hpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/os/linux/vm/os_linux.hpp	Tue Aug 27 18:55:33 2013 -0700
@@ -32,6 +32,7 @@
 
 class Linux {
   friend class os;
+  friend class TestReserveMemorySpecial;
 
   // For signal-chaining
 #define MAXSIGNUM 32
@@ -92,8 +93,21 @@
   static void rebuild_cpu_to_node_map();
   static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
 
+  static size_t find_large_page_size();
+  static size_t setup_large_page_size();
+
+  static bool setup_large_page_type(size_t page_size);
+  static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
   static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
 
+  static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
+  static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
+
+  static bool release_memory_special_shm(char* base, size_t bytes);
+  static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
+
   static void print_full_memory_info(outputStream* st);
   static void print_distro_info(outputStream* st);
   static void print_libversion_info(outputStream* st);
--- a/src/os/solaris/vm/os_solaris.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -3385,7 +3385,7 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
   fatal("os::reserve_memory_special should not be called on Solaris.");
   return NULL;
 }
@@ -6601,3 +6601,9 @@
 
   return strlen(buffer);
 }
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/src/os/windows/vm/os_windows.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -3156,7 +3156,12 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
+  assert(UseLargePages, "only for large pages");
+
+  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+    return NULL; // Fallback to small pages.
+  }
 
   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
@@ -5638,3 +5643,9 @@
 }
 
 #endif
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -2008,10 +2008,12 @@
 
   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
   size_t max_byte_size = collector_policy()->max_heap_byte_size();
+  size_t heap_alignment = collector_policy()->max_alignment();
 
   // Ensure that the sizes are properly aligned.
   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
+  Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
 
   _cg1r = new ConcurrentG1Refine(this);
 
@@ -2028,12 +2030,8 @@
   // If this happens then we could end up using a non-optimal
   // compressed oops mode.
 
-  // Since max_byte_size is aligned to the size of a heap region (checked
-  // above).
-  Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
-
   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
-                                                 HeapRegion::GrainBytes);
+                                                 heap_alignment);
 
   // It is important to do this in a way such that concurrent readers can't
   // temporarily think something is in the heap.  (I've actually seen this
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -313,7 +313,8 @@
 void G1CollectorPolicy::initialize_flags() {
   set_min_alignment(HeapRegion::GrainBytes);
   size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
-  set_max_alignment(MAX2(card_table_alignment, min_alignment()));
+  size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
+  set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
   if (SurvivorRatio < 1) {
     vm_exit_during_initialization("Invalid survivor ratio specified");
   }
--- a/src/share/vm/memory/allocation.hpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/memory/allocation.hpp	Tue Aug 27 18:55:33 2013 -0700
@@ -666,7 +666,7 @@
   NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
 
 #define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
-  (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
+  (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail)
 
 #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
   (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
@@ -675,16 +675,16 @@
   (type*) (AllocateHeap((size) * sizeof(type), memflags))
 
 #define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
-  NEW_C_HEAP_ARRAY3(type, size, memflags, pc, AllocFailStrategy::RETURN_NULL)
+  NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
 
 #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
-  NEW_C_HEAP_ARRAY3(type, size, memflags, (address)0, AllocFailStrategy::RETURN_NULL)
+  NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
 
 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
-  (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
+  (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
 
 #define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
-   (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
+  (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
 
 #define FREE_C_HEAP_ARRAY(type, old, memflags) \
   FreeHeap((char*)(old), memflags)
--- a/src/share/vm/memory/collectorPolicy.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/memory/collectorPolicy.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -193,6 +193,8 @@
       alignment = lcm(os::large_page_size(), alignment);
   }
 
+  assert(alignment >= min_alignment(), "Must be");
+
   return alignment;
 }
 
--- a/src/share/vm/memory/genCollectedHeap.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -95,13 +95,13 @@
   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 
   // The heap must be at least as aligned as generations.
-  size_t alignment = Generation::GenGrain;
+  size_t gen_alignment = Generation::GenGrain;
 
   _gen_specs = gen_policy()->generations();
 
   // Make sure the sizes are all aligned.
   for (i = 0; i < _n_gens; i++) {
-    _gen_specs[i]->align(alignment);
+    _gen_specs[i]->align(gen_alignment);
   }
 
   // Allocate space for the heap.
@@ -109,9 +109,11 @@
   char* heap_address;
   size_t total_reserved = 0;
   int n_covered_regions = 0;
-  ReservedSpace heap_rs(0);
+  ReservedSpace heap_rs;
 
-  heap_address = allocate(alignment, &total_reserved,
+  size_t heap_alignment = collector_policy()->max_alignment();
+
+  heap_address = allocate(heap_alignment, &total_reserved,
                           &n_covered_regions, &heap_rs);
 
   if (!heap_rs.is_reserved()) {
@@ -168,6 +170,8 @@
   const size_t pageSize = UseLargePages ?
       os::large_page_size() : os::vm_page_size();
 
+  assert(alignment % pageSize == 0, "Must be");
+
   for (int i = 0; i < _n_gens; i++) {
     total_reserved += _gen_specs[i]->max_size();
     if (total_reserved < _gen_specs[i]->max_size()) {
@@ -175,24 +179,17 @@
     }
     n_covered_regions += _gen_specs[i]->n_covered_regions();
   }
-  assert(total_reserved % pageSize == 0,
-         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
-                 SIZE_FORMAT, total_reserved, pageSize));
+  assert(total_reserved % alignment == 0,
+         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
+                 SIZE_FORMAT, total_reserved, alignment));
 
   // Needed until the cardtable is fixed to have the right number
   // of covered regions.
   n_covered_regions += 2;
 
-  if (UseLargePages) {
-    assert(total_reserved != 0, "total_reserved cannot be 0");
-    total_reserved = round_to(total_reserved, os::large_page_size());
-    if (total_reserved < os::large_page_size()) {
-      vm_exit_during_initialization(overflow_msg);
-    }
-  }
+  *_total_reserved = total_reserved;
+  *_n_covered_regions = n_covered_regions;
 
-      *_total_reserved = total_reserved;
-      *_n_covered_regions = n_covered_regions;
   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
   return heap_rs->base();
 }
--- a/src/share/vm/memory/metaspace.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/memory/metaspace.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -345,7 +345,7 @@
 };
 
   // byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
+VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   // align up to vm allocation granularity
   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
 
--- a/src/share/vm/memory/universe.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/memory/universe.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -681,17 +681,23 @@
 // 32Gb
 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 
-char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
+  assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
+  assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
+  assert(is_size_aligned(heap_size, alignment), "Must be");
+
+  uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
+
   size_t base = 0;
 #ifdef _LP64
   if (UseCompressedOops) {
     assert(mode == UnscaledNarrowOop  ||
            mode == ZeroBasedNarrowOop ||
            mode == HeapBasedNarrowOop, "mode is invalid");
-    const size_t total_size = heap_size + HeapBaseMinAddress;
+    const size_t total_size = heap_size + heap_base_min_address_aligned;
     // Return specified base for the first request.
     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
-      base = HeapBaseMinAddress;
+      base = heap_base_min_address_aligned;
 
     // If the total size is small enough to allow UnscaledNarrowOop then
     // just use UnscaledNarrowOop.
@@ -742,6 +748,8 @@
     }
   }
 #endif
+
+  assert(is_ptr_aligned((char*)base, alignment), "Must be");
   return (char*)base; // also return NULL (don't care) for 32-bit VM
 }
 
@@ -867,27 +875,33 @@
   size_t total_reserved = align_size_up(heap_size, alignment);
   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
       "heap size is too big for compressed oops");
-  char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 
-  ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
+  bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
+  assert(!UseLargePages
+      || UseParallelOldGC
+      || use_large_pages, "Wrong alignment to use large pages");
+
+  char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
+
+  ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
 
   if (UseCompressedOops) {
     if (addr != NULL && !total_rs.is_reserved()) {
       // Failed to reserve at specified address - the requested memory
       // region is taken already, for example, by 'java' launcher.
       // Try again to reserver heap higher.
-      addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
+      addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
 
       ReservedHeapSpace total_rs0(total_reserved, alignment,
-                                  UseLargePages, addr);
+          use_large_pages, addr);
 
       if (addr != NULL && !total_rs0.is_reserved()) {
         // Failed to reserve at specified address again - give up.
-        addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+        addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
         assert(addr == NULL, "");
 
         ReservedHeapSpace total_rs1(total_reserved, alignment,
-                                    UseLargePages, addr);
+            use_large_pages, addr);
         total_rs = total_rs1;
       } else {
         total_rs = total_rs0;
--- a/src/share/vm/memory/universe.hpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/memory/universe.hpp	Tue Aug 27 18:55:33 2013 -0700
@@ -346,7 +346,7 @@
   };
   static NARROW_OOP_MODE narrow_oop_mode();
   static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
-  static char*    preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
+  static char*    preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode);
   static char*    preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
   static address  narrow_oop_base()                       { return  _narrow_oop._base; }
   static bool  is_narrow_oop_base(void* addr)             { return (narrow_oop_base() == (address)addr); }
--- a/src/share/vm/prims/jni.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/prims/jni.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -3234,19 +3234,22 @@
  HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(
                                   env, string, (uintptr_t *) isCopy);
 #endif /* USDT2 */
-  //%note jni_5
-  if (isCopy != NULL) {
-    *isCopy = JNI_TRUE;
-  }
   oop s = JNIHandles::resolve_non_null(string);
   int s_len = java_lang_String::length(s);
   typeArrayOop s_value = java_lang_String::value(s);
   int s_offset = java_lang_String::offset(s);
-  jchar* buf = NEW_C_HEAP_ARRAY(jchar, s_len + 1, mtInternal);  // add one for zero termination
-  if (s_len > 0) {
-    memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
+  jchar* buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal);  // add one for zero termination
+  /* JNI Specification states return NULL on OOM */
+  if (buf != NULL) {
+    if (s_len > 0) {
+      memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
+    }
+    buf[s_len] = 0;
+    //%note jni_5
+    if (isCopy != NULL) {
+      *isCopy = JNI_TRUE;
+    }
   }
-  buf[s_len] = 0;
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringChars__return, buf);
 #else /* USDT2 */
@@ -3335,9 +3338,14 @@
 #endif /* USDT2 */
   oop java_string = JNIHandles::resolve_non_null(string);
   size_t length = java_lang_String::utf8_length(java_string);
-  char* result = AllocateHeap(length + 1, mtInternal);
-  java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
-  if (isCopy != NULL) *isCopy = JNI_TRUE;
+  /* JNI Specification states return NULL on OOM */
+  char* result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL);
+  if (result != NULL) {
+    java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
+    if (isCopy != NULL) {
+      *isCopy = JNI_TRUE;
+    }
+  }
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringUTFChars__return, result);
 #else /* USDT2 */
@@ -3591,11 +3599,16 @@
      * Avoid asserts in typeArrayOop. */ \
     result = (ElementType*)get_bad_address(); \
   } else { \
-    result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \
-    /* copy the array to the c chunk */ \
-    memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \
+    /* JNI Specification states return NULL on OOM */                    \
+    result = NEW_C_HEAP_ARRAY_RETURN_NULL(ElementType, len, mtInternal); \
+    if (result != NULL) {                                                \
+      /* copy the array to the c chunk */                                \
+      memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len);      \
+      if (isCopy) {                                                      \
+        *isCopy = JNI_TRUE;                                              \
+      }                                                                  \
+    }                                                                    \
   } \
-  if (isCopy) *isCopy = JNI_TRUE; \
   DTRACE_PROBE1(hotspot_jni, Get##Result##ArrayElements__return, result);\
   return result; \
 JNI_END
@@ -3628,11 +3641,16 @@
      * Avoid asserts in typeArrayOop. */ \
     result = (ElementType*)get_bad_address(); \
   } else { \
-    result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \
-    /* copy the array to the c chunk */ \
-    memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \
+    /* JNI Specification states return NULL on OOM */                    \
+    result = NEW_C_HEAP_ARRAY_RETURN_NULL(ElementType, len, mtInternal); \
+    if (result != NULL) {                                                \
+      /* copy the array to the c chunk */                                \
+      memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len);      \
+      if (isCopy) {                                                      \
+        *isCopy = JNI_TRUE;                                              \
+      }                                                                  \
+    }                                                                    \
   } \
-  if (isCopy) *isCopy = JNI_TRUE; \
   ReturnProbe; \
   return result; \
 JNI_END
@@ -5027,9 +5045,15 @@
   tty->print_cr("Running test: " #unit_test_function_call); \
   unit_test_function_call
 
+// Forward declaration
+void TestReservedSpace_test();
+void TestReserveMemorySpecial_test();
+
 void execute_internal_vm_tests() {
   if (ExecuteInternalVMTests) {
     tty->print_cr("Running internal VM tests");
+    run_unit_test(TestReservedSpace_test());
+    run_unit_test(TestReserveMemorySpecial_test());
     run_unit_test(GlobalDefinitions::test_globals());
     run_unit_test(GCTimerAllTest::all());
     run_unit_test(arrayOopDesc::test_max_array_length());
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -1554,18 +1554,22 @@
     return false;
   }
 
-  // rewrite sourc file name index:
+  // rewrite source file name index:
   u2 source_file_name_idx = scratch_class->source_file_name_index();
   if (source_file_name_idx != 0) {
     u2 new_source_file_name_idx = find_new_index(source_file_name_idx);
-    scratch_class->set_source_file_name_index(new_source_file_name_idx);
+    if (new_source_file_name_idx != 0) {
+      scratch_class->set_source_file_name_index(new_source_file_name_idx);
+    }
   }
 
   // rewrite class generic signature index:
   u2 generic_signature_index = scratch_class->generic_signature_index();
   if (generic_signature_index != 0) {
     u2 new_generic_signature_index = find_new_index(generic_signature_index);
-    scratch_class->set_generic_signature_index(new_generic_signature_index);
+    if (new_generic_signature_index != 0) {
+      scratch_class->set_generic_signature_index(new_generic_signature_index);
+    }
   }
 
   return true;
@@ -1737,7 +1741,10 @@
 
     for (int i = 0; i < len; i++) {
       const u2 cp_index = elem[i].name_cp_index;
-      elem[i].name_cp_index = find_new_index(cp_index);
+      const u2 new_cp_index = find_new_index(cp_index);
+      if (new_cp_index != 0) {
+        elem[i].name_cp_index = new_cp_index;
+      }
     }
   }
 } // end rewrite_cp_refs_in_method()
--- a/src/share/vm/runtime/globals.hpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/runtime/globals.hpp	Tue Aug 27 18:55:33 2013 -0700
@@ -1933,6 +1933,9 @@
   notproduct(bool, ExecuteInternalVMTests, false,                           \
           "Enable execution of internal VM tests.")                         \
                                                                             \
+  notproduct(bool, VerboseInternalVMTests, false,                           \
+          "Turn on logging for internal VM tests.")                         \
+                                                                            \
   product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
                                                                             \
   product_pd(bool, ResizeTLAB,                                              \
--- a/src/share/vm/runtime/mutexLocker.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/runtime/mutexLocker.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -124,13 +124,15 @@
 
 Mutex*   Management_lock              = NULL;
 Monitor* Service_lock                 = NULL;
-Mutex*   Stacktrace_lock              = NULL;
+Monitor* PeriodicTask_lock            = NULL;
 
-Monitor* JfrQuery_lock                = NULL;
+#ifdef INCLUDE_TRACE
+Mutex*   JfrStacktrace_lock           = NULL;
 Monitor* JfrMsg_lock                  = NULL;
 Mutex*   JfrBuffer_lock               = NULL;
 Mutex*   JfrStream_lock               = NULL;
-Monitor* PeriodicTask_lock            = NULL;
+Mutex*   JfrThreadGroups_lock         = NULL;
+#endif
 
 #define MAX_NUM_MUTEX 128
 static Monitor * _mutex_array[MAX_NUM_MUTEX];
@@ -206,7 +208,6 @@
   def(Patching_lock                , Mutex  , special,     true ); // used for safepointing and code patching.
   def(ObjAllocPost_lock            , Monitor, special,     false);
   def(Service_lock                 , Monitor, special,     true ); // used for service thread operations
-  def(Stacktrace_lock              , Mutex,   special,     true ); // used for JFR stacktrace database
   def(JmethodIdCreation_lock       , Mutex  , leaf,        true ); // used for creating jmethodIDs.
 
   def(SystemDictionary_lock        , Monitor, leaf,        true ); // lookups done by VM thread
@@ -272,11 +273,16 @@
   def(Debug3_lock                  , Mutex  , nonleaf+4,   true );
   def(ProfileVM_lock               , Monitor, special,   false); // used for profiling of the VMThread
   def(CompileThread_lock           , Monitor, nonleaf+5,   false );
+  def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
 
+#ifdef INCLUDE_TRACE
   def(JfrMsg_lock                  , Monitor, leaf,        true);
   def(JfrBuffer_lock               , Mutex,   nonleaf+1,   true);
+  def(JfrThreadGroups_lock         , Mutex,   nonleaf+1,   true);
   def(JfrStream_lock               , Mutex,   nonleaf+2,   true);
-  def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
+  def(JfrStacktrace_lock           , Mutex,   special,     true );
+#endif
+
 }
 
 GCMutexLocker::GCMutexLocker(Monitor * mutex) {
--- a/src/share/vm/runtime/mutexLocker.hpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/runtime/mutexLocker.hpp	Tue Aug 27 18:55:33 2013 -0700
@@ -137,13 +137,15 @@
 
 extern Mutex*   Management_lock;                 // a lock used to serialize JVM management
 extern Monitor* Service_lock;                    // a lock used for service thread operation
-extern Mutex*   Stacktrace_lock;                 // used to guard access to the stacktrace table
+extern Monitor* PeriodicTask_lock;               // protects the periodic task structure
 
-extern Monitor* JfrQuery_lock;                   // protects JFR use
+#ifdef INCLUDE_TRACE
+extern Mutex*   JfrStacktrace_lock;              // used to guard access to the JFR stacktrace table
 extern Monitor* JfrMsg_lock;                     // protects JFR messaging
 extern Mutex*   JfrBuffer_lock;                  // protects JFR buffer operations
 extern Mutex*   JfrStream_lock;                  // protects JFR stream access
-extern Monitor* PeriodicTask_lock;               // protects the periodic task structure
+extern Mutex*   JfrThreadGroups_lock;            // protects JFR access to Thread Groups
+#endif
 
 // A MutexLocker provides mutual exclusion with respect to a given mutex
 // for the scope which contains the locker.  The lock is an OS lock, not
--- a/src/share/vm/runtime/os.hpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/runtime/os.hpp	Tue Aug 27 18:55:33 2013 -0700
@@ -328,8 +328,8 @@
 
   static char*  non_memory_address_word();
   // reserve, commit and pin the entire memory region
-  static char*  reserve_memory_special(size_t size, char* addr = NULL,
-                bool executable = false);
+  static char*  reserve_memory_special(size_t size, size_t alignment,
+                                       char* addr, bool executable);
   static bool   release_memory_special(char* addr, size_t bytes);
   static void   large_page_init();
   static size_t large_page_size();
--- a/src/share/vm/runtime/virtualspace.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/runtime/virtualspace.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -42,8 +42,19 @@
 
 
 // ReservedSpace
+
+// Dummy constructor
+ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
+    _alignment(0), _special(false), _executable(false) {
+}
+
 ReservedSpace::ReservedSpace(size_t size) {
-  initialize(size, 0, false, NULL, 0, false);
+  size_t page_size = os::page_size_for_region(size, size, 1);
+  bool large_pages = page_size != (size_t)os::vm_page_size();
+  // Don't force the alignment to be large page aligned,
+  // since that will waste memory.
+  size_t alignment = os::vm_allocation_granularity();
+  initialize(size, alignment, large_pages, NULL, 0, false);
 }
 
 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
@@ -129,16 +140,18 @@
 
   if (special) {
 
-    base = os::reserve_memory_special(size, requested_address, executable);
+    base = os::reserve_memory_special(size, alignment, requested_address, executable);
 
     if (base != NULL) {
       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
         // OS ignored requested address. Try different address.
         return;
       }
-      // Check alignment constraints
+      // Check alignment constraints.
       assert((uintptr_t) base % alignment == 0,
-             "Large pages returned a non-aligned address");
+             err_msg("Large pages returned a non-aligned address, base: "
+                 PTR_FORMAT " alignment: " PTR_FORMAT,
+                 base, (void*)(uintptr_t)alignment));
       _special = true;
     } else {
       // failed; try to reserve regular memory below
@@ -715,4 +728,188 @@
   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 }
 
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define test_log(...) \
+  do {\
+    if (VerboseInternalVMTests) { \
+      tty->print_cr(__VA_ARGS__); \
+      tty->flush(); \
+    }\
+  } while (false)
+
+class TestReservedSpace : AllStatic {
+ public:
+  static void small_page_write(void* addr, size_t size) {
+    size_t page_size = os::vm_page_size();
+
+    char* end = (char*)addr + size;
+    for (char* p = (char*)addr; p < end; p += page_size) {
+      *p = 1;
+    }
+  }
+
+  static void release_memory_for_test(ReservedSpace rs) {
+    if (rs.special()) {
+      guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
+    } else {
+      guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
+    }
+  }
+
+  static void test_reserved_space1(size_t size, size_t alignment) {
+    test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
+
+    assert(is_size_aligned(size, alignment), "Incorrect input parameters");
+
+    ReservedSpace rs(size,          // size
+                     alignment,     // alignment
+                     UseLargePages, // large
+                     NULL,          // requested_address
+                     0);            // noacces_prefix
+
+    test_log(" rs.special() == %d", rs.special());
+
+    assert(rs.base() != NULL, "Must be");
+    assert(rs.size() == size, "Must be");
+
+    assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
+    assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
+
+    if (rs.special()) {
+      small_page_write(rs.base(), size);
+    }
+
+    release_memory_for_test(rs);
+  }
+
+  static void test_reserved_space2(size_t size) {
+    test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
+
+    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+
+    ReservedSpace rs(size);
+
+    test_log(" rs.special() == %d", rs.special());
+
+    assert(rs.base() != NULL, "Must be");
+    assert(rs.size() == size, "Must be");
+
+    if (rs.special()) {
+      small_page_write(rs.base(), size);
+    }
+
+    release_memory_for_test(rs);
+  }
+
+  static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
+    test_log("test_reserved_space3(%p, %p, %d)",
+        (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
+
+    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+    assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
+
+    bool large = maybe_large && UseLargePages && size >= os::large_page_size();
+
+    ReservedSpace rs(size, alignment, large, false);
+
+    test_log(" rs.special() == %d", rs.special());
+
+    assert(rs.base() != NULL, "Must be");
+    assert(rs.size() == size, "Must be");
+
+    if (rs.special()) {
+      small_page_write(rs.base(), size);
+    }
+
+    release_memory_for_test(rs);
+  }
+
+
+  static void test_reserved_space1() {
+    size_t size = 2 * 1024 * 1024;
+    size_t ag   = os::vm_allocation_granularity();
+
+    test_reserved_space1(size,      ag);
+    test_reserved_space1(size * 2,  ag);
+    test_reserved_space1(size * 10, ag);
+  }
+
+  static void test_reserved_space2() {
+    size_t size = 2 * 1024 * 1024;
+    size_t ag = os::vm_allocation_granularity();
+
+    test_reserved_space2(size * 1);
+    test_reserved_space2(size * 2);
+    test_reserved_space2(size * 10);
+    test_reserved_space2(ag);
+    test_reserved_space2(size - ag);
+    test_reserved_space2(size);
+    test_reserved_space2(size + ag);
+    test_reserved_space2(size * 2);
+    test_reserved_space2(size * 2 - ag);
+    test_reserved_space2(size * 2 + ag);
+    test_reserved_space2(size * 3);
+    test_reserved_space2(size * 3 - ag);
+    test_reserved_space2(size * 3 + ag);
+    test_reserved_space2(size * 10);
+    test_reserved_space2(size * 10 + size / 2);
+  }
+
+  static void test_reserved_space3() {
+    size_t ag = os::vm_allocation_granularity();
+
+    test_reserved_space3(ag,      ag    , false);
+    test_reserved_space3(ag * 2,  ag    , false);
+    test_reserved_space3(ag * 3,  ag    , false);
+    test_reserved_space3(ag * 2,  ag * 2, false);
+    test_reserved_space3(ag * 4,  ag * 2, false);
+    test_reserved_space3(ag * 8,  ag * 2, false);
+    test_reserved_space3(ag * 4,  ag * 4, false);
+    test_reserved_space3(ag * 8,  ag * 4, false);
+    test_reserved_space3(ag * 16, ag * 4, false);
+
+    if (UseLargePages) {
+      size_t lp = os::large_page_size();
+
+      // Without large pages
+      test_reserved_space3(lp,     ag * 4, false);
+      test_reserved_space3(lp * 2, ag * 4, false);
+      test_reserved_space3(lp * 4, ag * 4, false);
+      test_reserved_space3(lp,     lp    , false);
+      test_reserved_space3(lp * 2, lp    , false);
+      test_reserved_space3(lp * 3, lp    , false);
+      test_reserved_space3(lp * 2, lp * 2, false);
+      test_reserved_space3(lp * 4, lp * 2, false);
+      test_reserved_space3(lp * 8, lp * 2, false);
+
+      // With large pages
+      test_reserved_space3(lp, ag * 4    , true);
+      test_reserved_space3(lp * 2, ag * 4, true);
+      test_reserved_space3(lp * 4, ag * 4, true);
+      test_reserved_space3(lp, lp        , true);
+      test_reserved_space3(lp * 2, lp    , true);
+      test_reserved_space3(lp * 3, lp    , true);
+      test_reserved_space3(lp * 2, lp * 2, true);
+      test_reserved_space3(lp * 4, lp * 2, true);
+      test_reserved_space3(lp * 8, lp * 2, true);
+    }
+  }
+
+  static void test_reserved_space() {
+    test_reserved_space1();
+    test_reserved_space2();
+    test_reserved_space3();
+  }
+};
+
+void TestReservedSpace_test() {
+  TestReservedSpace::test_reserved_space();
+}
+
+#endif // PRODUCT
+
 #endif
--- a/src/share/vm/runtime/virtualspace.hpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/runtime/virtualspace.hpp	Tue Aug 27 18:55:33 2013 -0700
@@ -53,6 +53,7 @@
 
  public:
   // Constructor
+  ReservedSpace();
   ReservedSpace(size_t size);
   ReservedSpace(size_t size, size_t alignment, bool large,
                 char* requested_address = NULL,
--- a/src/share/vm/services/management.cpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/services/management.cpp	Tue Aug 27 18:55:33 2013 -0700
@@ -876,8 +876,6 @@
       total_used += u.used();
       total_committed += u.committed();
 
-      // if any one of the memory pool has undefined init_size or max_size,
-      // set it to -1
       if (u.init_size() == (size_t)-1) {
         has_undefined_init_size = true;
       }
@@ -894,6 +892,15 @@
     }
   }
 
+  // if any one of the memory pool has undefined init_size or max_size,
+  // set it to -1
+  if (has_undefined_init_size) {
+    total_init = (size_t)-1;
+  }
+  if (has_undefined_max_size) {
+    total_max = (size_t)-1;
+  }
+
   MemoryUsage usage((heap ? InitialHeapSize : total_init),
                     total_used,
                     total_committed,
--- a/src/share/vm/services/memTracker.hpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/services/memTracker.hpp	Tue Aug 27 18:55:33 2013 -0700
@@ -87,6 +87,8 @@
         MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
    static inline void record_virtual_memory_commit(address addr, size_t size,
         address pc = 0, Thread* thread = NULL) { }
+   static inline void record_virtual_memory_release(address addr, size_t size,
+        Thread* thread = NULL) { }
    static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
         Thread* thread = NULL) { }
    static inline Tracker get_realloc_tracker() { return _tkr; }
@@ -372,6 +374,13 @@
     tkr.record(addr, size, flags, pc);
   }
 
+  static inline void record_virtual_memory_release(address addr, size_t size,
+      Thread* thread = NULL) {
+    if (is_on()) {
+      Tracker tkr(Tracker::Release, thread);
+      tkr.record(addr, size);
+    }
+  }
 
   // record memory type on virtual memory base address
   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
--- a/src/share/vm/utilities/globalDefinitions.hpp	Fri Aug 23 15:59:20 2013 -0700
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Tue Aug 27 18:55:33 2013 -0700
@@ -402,6 +402,14 @@
 
 #define align_size_up_(size, alignment) (((size) + ((alignment) - 1)) & ~((alignment) - 1))
 
+inline bool is_size_aligned(size_t size, size_t alignment) {
+  return align_size_up_(size, alignment) == size;
+}
+
+inline bool is_ptr_aligned(void* ptr, size_t alignment) {
+  return align_size_up_((intptr_t)ptr, (intptr_t)alignment) == (intptr_t)ptr;
+}
+
 inline intptr_t align_size_up(intptr_t size, intptr_t alignment) {
   return align_size_up_(size, alignment);
 }
@@ -414,6 +422,14 @@
 
 #define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment)))
 
+inline void* align_ptr_up(void* ptr, size_t alignment) {
+  return (void*)align_size_up((intptr_t)ptr, (intptr_t)alignment);
+}
+
+inline void* align_ptr_down(void* ptr, size_t alignment) {
+  return (void*)align_size_down((intptr_t)ptr, (intptr_t)alignment);
+}
+
 // Align objects by rounding up their size, in HeapWord units.
 
 #define align_object_size_(size) align_size_up_(size, MinObjAlignment)
--- a/test/runtime/7051189/Xchecksig.sh	Fri Aug 23 15:59:20 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-# 
-#  Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
-#  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-# 
-#  This code is free software; you can redistribute it and/or modify it
-#  under the terms of the GNU General Public License version 2 only, as
-#  published by the Free Software Foundation.
-# 
-#  This code is distributed in the hope that it will be useful, but WITHOUT
-#  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-#  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-#  version 2 for more details (a copy is included in the LICENSE file that
-#  accompanied this code).
-# 
-#  You should have received a copy of the GNU General Public License version
-#  2 along with this work; if not, write to the Free Software Foundation,
-#  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-# 
-#  Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-#  or visit www.oracle.com if you need additional information or have any
-#  questions.
-# 
-
- 
-# @test Xchecksig.sh
-# @bug 7051189
-# @summary Need to suppress info message if -xcheck:jni used with libjsig.so
-# @run shell Xchecksig.sh
-#
-
-if [ "${TESTSRC}" = "" ]
-then
-  TESTSRC=${PWD}
-  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
-fi
-echo "TESTSRC=${TESTSRC}"
-## Adding common setup Variables for running shell tests.
-. ${TESTSRC}/../../test_env.sh
-
-OS=`uname -s`
-case "$OS" in
-  Windows_* | CYGWIN_* )
-    printf "Not testing libjsig.so on Windows. PASSED.\n "
-    exit 0
-    ;;
-esac
-
-JAVA=${TESTJAVA}${FS}bin${FS}java
-
-# LD_PRELOAD arch needs to match the binary we run, so run the java
-# 64-bit binary directly if we are testing 64-bit (bin/ARCH/java).
-# Check if TESTVMOPS contains -d64, but cannot use 
-# java ${TESTVMOPS} to run "java -d64"  with LD_PRELOAD.
-
-if [ ${OS} -eq "SunOS" ]
-then
-  printf  "SunOS test TESTVMOPTS = ${TESTVMOPTS}"
-  printf ${TESTVMOPTS} | grep d64 > /dev/null
-  if [ $? -eq 0 ]
-  then
-    printf "SunOS 64-bit test\n"
-    BIT_FLAG=-d64
-  fi
-fi
-
-ARCH=`uname -p`
-case $ARCH in
-  i386)
-    if [ X${BIT_FLAG} != "X" ]
-    then
-      ARCH=amd64
-      JAVA=${TESTJAVA}${FS}bin${FS}${ARCH}${FS}java
-    fi
-    ;;
-  sparc)
-    if [ X${BIT_FLAG} != "X" ]
-    then
-      ARCH=sparcv9
-      JAVA=${TESTJAVA}${FS}bin${FS}${ARCH}${FS}java
-    fi
-    ;;
-  * )
-    printf "Not testing architecture $ARCH, skipping test.\n"
-    exit 0
-  ;; 
-esac
-
-LIBJSIG=${COMPILEJAVA}${FS}jre${FS}lib${FS}${ARCH}${FS}libjsig.so
-
-# If libjsig and binary do not match, skip test.
-
-A=`file ${LIBJSIG} | awk '{ print $3 }'`
-B=`file ${JAVA}    | awk '{ print $3 }'`
-
-if [ $A -ne $B ]
-then
-  printf "Mismatching binary and library to preload, skipping test.\n"
-  exit 0
-fi
-
-if [ ! -f ${LIBJSIG} ]
-then
-  printf "Skipping test: libjsig missing for given architecture: ${LIBJSIG}\n"
-  exit 0
-fi
-# Use java -version to test, java version info appears on stderr,
-# the libjsig message we are removing appears on stdout.
-
-# grep returns zero meaning found, non-zero means not found:
-
-LD_PRELOAD=${LIBJSIG} ${JAVA} ${TESTVMOPTS} -Xcheck:jni -version 2>&1  | grep "libjsig is activated"
-if [ $? -eq 0 ]; then
-  printf "Failed: -Xcheck:jni prints message when libjsig.so is loaded.\n"
-  exit 1
-fi
-
-
-LD_PRELOAD=${LIBJSIG} ${JAVA} ${TESTVMOPTS} -Xcheck:jni -verbose:jni -version 2>&1 | grep "libjsig is activated"
-if [ $? != 0 ]; then
-  printf "Failed: -Xcheck:jni does not print message when libjsig.so is loaded and -verbose:jni is set.\n"
-  exit 1
-fi
-
-printf "PASSED\n"
-exit 0
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/XCheckJniJsig/XCheckJSig.java	Tue Aug 27 18:55:33 2013 -0700
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7051189 8023393
+ * @summary Need to suppress info message if -Xcheck:jni is used with libjsig.so
+ * @library /testlibrary
+ * @run main XCheckJSig
+ */
+
+import java.util.*;
+import com.oracle.java.testlibrary.*;
+
+public class XCheckJSig {
+    public static void main(String args[]) throws Throwable {
+
+        System.out.println("Regression test for bugs 7051189 and 8023393");
+        if (!Platform.isSolaris() && !Platform.isLinux() && !Platform.isOSX()) {
+            System.out.println("Test only applicable on Solaris, Linux, and Mac OSX, skipping");
+            return;
+        }
+
+        String jdk_path = System.getProperty("test.jdk");
+        String os_arch = Platform.getOsArch();
+        String libjsig;
+        String env_var;
+        if (Platform.isOSX()) {
+            libjsig = jdk_path + "/jre/lib/server/libjsig.dylib";
+            env_var = "DYLD_INSERT_LIBRARIES";
+        } else {
+            libjsig = jdk_path + "/jre/lib/" + os_arch + "/libjsig.so";
+            env_var = "LD_PRELOAD";
+        }
+        String java_program;
+        if (Platform.isSolaris()) {
+            // On Solaris, need to call the 64-bit Java directly in order for
+            // LD_PRELOAD to work because libjsig.so is 64-bit.
+            java_program = jdk_path + "/jre/bin/" + os_arch + "/java";
+        } else {
+            java_program = JDKToolFinder.getJDKTool("java");
+        }
+        // If this test fails, these might be useful to know.
+        System.out.println("libjsig: " + libjsig);
+        System.out.println("osArch: " + os_arch);
+        System.out.println("java_program: " + java_program);
+
+        ProcessBuilder pb = new ProcessBuilder(java_program, "-Xcheck:jni", "-version");
+        Map<String, String> env = pb.environment();
+        env.put(env_var, libjsig);
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("libjsig is activated");
+        output.shouldHaveExitValue(0);
+
+        pb = new ProcessBuilder(java_program, "-Xcheck:jni", "-verbose:jni", "-version");
+        env = pb.environment();
+        env.put(env_var, libjsig);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("libjsig is activated");
+        output.shouldHaveExitValue(0);
+    }
+}